2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $
35 #include <linux/init.h>
37 #include <linux/module.h>
38 #include <linux/device.h>
39 #include <linux/err.h>
40 #include <linux/poll.h>
41 #include <linux/file.h>
42 #include <linux/mount.h>
43 #include <linux/cdev.h>
45 #include <asm/uaccess.h>
49 MODULE_AUTHOR("Libor Michalek");
50 MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
51 MODULE_LICENSE("Dual BSD/GPL");
53 static int ucm_debug_level
;
55 module_param_named(debug_level
, ucm_debug_level
, int, 0644);
56 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
63 #define IB_UCM_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_MINOR)
67 #define ucm_dbg(format, arg...) \
69 if (ucm_debug_level > 0) \
70 printk(KERN_DEBUG PFX format, ## arg); \
73 static struct semaphore ctx_id_mutex
;
74 static struct idr ctx_id_table
;
76 static struct ib_ucm_context
*ib_ucm_ctx_get(struct ib_ucm_file
*file
, int id
)
78 struct ib_ucm_context
*ctx
;
81 ctx
= idr_find(&ctx_id_table
, id
);
83 ctx
= ERR_PTR(-ENOENT
);
84 else if (ctx
->file
!= file
)
85 ctx
= ERR_PTR(-EINVAL
);
87 atomic_inc(&ctx
->ref
);
93 static void ib_ucm_ctx_put(struct ib_ucm_context
*ctx
)
95 if (atomic_dec_and_test(&ctx
->ref
))
99 static inline int ib_ucm_new_cm_id(int event
)
101 return event
== IB_CM_REQ_RECEIVED
|| event
== IB_CM_SIDR_REQ_RECEIVED
;
104 static void ib_ucm_cleanup_events(struct ib_ucm_context
*ctx
)
106 struct ib_ucm_event
*uevent
;
108 down(&ctx
->file
->mutex
);
109 list_del(&ctx
->file_list
);
110 while (!list_empty(&ctx
->events
)) {
112 uevent
= list_entry(ctx
->events
.next
,
113 struct ib_ucm_event
, ctx_list
);
114 list_del(&uevent
->file_list
);
115 list_del(&uevent
->ctx_list
);
117 /* clear incoming connections. */
118 if (ib_ucm_new_cm_id(uevent
->resp
.event
))
119 ib_destroy_cm_id(uevent
->cm_id
);
123 up(&ctx
->file
->mutex
);
126 static struct ib_ucm_context
*ib_ucm_ctx_alloc(struct ib_ucm_file
*file
)
128 struct ib_ucm_context
*ctx
;
131 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
135 memset(ctx
, 0, sizeof *ctx
);
136 atomic_set(&ctx
->ref
, 1);
137 init_waitqueue_head(&ctx
->wait
);
139 INIT_LIST_HEAD(&ctx
->events
);
142 result
= idr_pre_get(&ctx_id_table
, GFP_KERNEL
);
147 result
= idr_get_new(&ctx_id_table
, ctx
, &ctx
->id
);
149 } while (result
== -EAGAIN
);
154 list_add_tail(&ctx
->file_list
, &file
->ctxs
);
155 ucm_dbg("Allocated CM ID <%d>\n", ctx
->id
);
163 * Event portion of the API, handle CM events
164 * and allow event polling.
166 static void ib_ucm_event_path_get(struct ib_ucm_path_rec
*upath
,
167 struct ib_sa_path_rec
*kpath
)
169 if (!kpath
|| !upath
)
172 memcpy(upath
->dgid
, kpath
->dgid
.raw
, sizeof *upath
->dgid
);
173 memcpy(upath
->sgid
, kpath
->sgid
.raw
, sizeof *upath
->sgid
);
175 upath
->dlid
= kpath
->dlid
;
176 upath
->slid
= kpath
->slid
;
177 upath
->raw_traffic
= kpath
->raw_traffic
;
178 upath
->flow_label
= kpath
->flow_label
;
179 upath
->hop_limit
= kpath
->hop_limit
;
180 upath
->traffic_class
= kpath
->traffic_class
;
181 upath
->reversible
= kpath
->reversible
;
182 upath
->numb_path
= kpath
->numb_path
;
183 upath
->pkey
= kpath
->pkey
;
184 upath
->sl
= kpath
->sl
;
185 upath
->mtu_selector
= kpath
->mtu_selector
;
186 upath
->mtu
= kpath
->mtu
;
187 upath
->rate_selector
= kpath
->rate_selector
;
188 upath
->rate
= kpath
->rate
;
189 upath
->packet_life_time
= kpath
->packet_life_time
;
190 upath
->preference
= kpath
->preference
;
192 upath
->packet_life_time_selector
=
193 kpath
->packet_life_time_selector
;
196 static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp
*ureq
,
197 struct ib_cm_req_event_param
*kreq
)
199 ureq
->remote_ca_guid
= kreq
->remote_ca_guid
;
200 ureq
->remote_qkey
= kreq
->remote_qkey
;
201 ureq
->remote_qpn
= kreq
->remote_qpn
;
202 ureq
->qp_type
= kreq
->qp_type
;
203 ureq
->starting_psn
= kreq
->starting_psn
;
204 ureq
->responder_resources
= kreq
->responder_resources
;
205 ureq
->initiator_depth
= kreq
->initiator_depth
;
206 ureq
->local_cm_response_timeout
= kreq
->local_cm_response_timeout
;
207 ureq
->flow_control
= kreq
->flow_control
;
208 ureq
->remote_cm_response_timeout
= kreq
->remote_cm_response_timeout
;
209 ureq
->retry_count
= kreq
->retry_count
;
210 ureq
->rnr_retry_count
= kreq
->rnr_retry_count
;
211 ureq
->srq
= kreq
->srq
;
213 ib_ucm_event_path_get(&ureq
->primary_path
, kreq
->primary_path
);
214 ib_ucm_event_path_get(&ureq
->alternate_path
, kreq
->alternate_path
);
217 static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp
*urep
,
218 struct ib_cm_rep_event_param
*krep
)
220 urep
->remote_ca_guid
= krep
->remote_ca_guid
;
221 urep
->remote_qkey
= krep
->remote_qkey
;
222 urep
->remote_qpn
= krep
->remote_qpn
;
223 urep
->starting_psn
= krep
->starting_psn
;
224 urep
->responder_resources
= krep
->responder_resources
;
225 urep
->initiator_depth
= krep
->initiator_depth
;
226 urep
->target_ack_delay
= krep
->target_ack_delay
;
227 urep
->failover_accepted
= krep
->failover_accepted
;
228 urep
->flow_control
= krep
->flow_control
;
229 urep
->rnr_retry_count
= krep
->rnr_retry_count
;
230 urep
->srq
= krep
->srq
;
233 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp
*urep
,
234 struct ib_cm_sidr_rep_event_param
*krep
)
236 urep
->status
= krep
->status
;
237 urep
->qkey
= krep
->qkey
;
238 urep
->qpn
= krep
->qpn
;
241 static int ib_ucm_event_process(struct ib_cm_event
*evt
,
242 struct ib_ucm_event
*uvt
)
246 switch (evt
->event
) {
247 case IB_CM_REQ_RECEIVED
:
248 ib_ucm_event_req_get(&uvt
->resp
.u
.req_resp
,
249 &evt
->param
.req_rcvd
);
250 uvt
->data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
;
251 uvt
->resp
.present
= IB_UCM_PRES_PRIMARY
;
252 uvt
->resp
.present
|= (evt
->param
.req_rcvd
.alternate_path
?
253 IB_UCM_PRES_ALTERNATE
: 0);
255 case IB_CM_REP_RECEIVED
:
256 ib_ucm_event_rep_get(&uvt
->resp
.u
.rep_resp
,
257 &evt
->param
.rep_rcvd
);
258 uvt
->data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
260 case IB_CM_RTU_RECEIVED
:
261 uvt
->data_len
= IB_CM_RTU_PRIVATE_DATA_SIZE
;
262 uvt
->resp
.u
.send_status
= evt
->param
.send_status
;
264 case IB_CM_DREQ_RECEIVED
:
265 uvt
->data_len
= IB_CM_DREQ_PRIVATE_DATA_SIZE
;
266 uvt
->resp
.u
.send_status
= evt
->param
.send_status
;
268 case IB_CM_DREP_RECEIVED
:
269 uvt
->data_len
= IB_CM_DREP_PRIVATE_DATA_SIZE
;
270 uvt
->resp
.u
.send_status
= evt
->param
.send_status
;
272 case IB_CM_MRA_RECEIVED
:
273 uvt
->resp
.u
.mra_resp
.timeout
=
274 evt
->param
.mra_rcvd
.service_timeout
;
275 uvt
->data_len
= IB_CM_MRA_PRIVATE_DATA_SIZE
;
277 case IB_CM_REJ_RECEIVED
:
278 uvt
->resp
.u
.rej_resp
.reason
= evt
->param
.rej_rcvd
.reason
;
279 uvt
->data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
280 uvt
->info_len
= evt
->param
.rej_rcvd
.ari_length
;
281 info
= evt
->param
.rej_rcvd
.ari
;
283 case IB_CM_LAP_RECEIVED
:
284 ib_ucm_event_path_get(&uvt
->resp
.u
.lap_resp
.path
,
285 evt
->param
.lap_rcvd
.alternate_path
);
286 uvt
->data_len
= IB_CM_LAP_PRIVATE_DATA_SIZE
;
287 uvt
->resp
.present
= IB_UCM_PRES_ALTERNATE
;
289 case IB_CM_APR_RECEIVED
:
290 uvt
->resp
.u
.apr_resp
.status
= evt
->param
.apr_rcvd
.ap_status
;
291 uvt
->data_len
= IB_CM_APR_PRIVATE_DATA_SIZE
;
292 uvt
->info_len
= evt
->param
.apr_rcvd
.info_len
;
293 info
= evt
->param
.apr_rcvd
.apr_info
;
295 case IB_CM_SIDR_REQ_RECEIVED
:
296 uvt
->resp
.u
.sidr_req_resp
.pkey
=
297 evt
->param
.sidr_req_rcvd
.pkey
;
298 uvt
->data_len
= IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
;
300 case IB_CM_SIDR_REP_RECEIVED
:
301 ib_ucm_event_sidr_rep_get(&uvt
->resp
.u
.sidr_rep_resp
,
302 &evt
->param
.sidr_rep_rcvd
);
303 uvt
->data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
304 uvt
->info_len
= evt
->param
.sidr_rep_rcvd
.info_len
;
305 info
= evt
->param
.sidr_rep_rcvd
.info
;
308 uvt
->resp
.u
.send_status
= evt
->param
.send_status
;
313 uvt
->data
= kmalloc(uvt
->data_len
, GFP_KERNEL
);
317 memcpy(uvt
->data
, evt
->private_data
, uvt
->data_len
);
318 uvt
->resp
.present
|= IB_UCM_PRES_DATA
;
322 uvt
->info
= kmalloc(uvt
->info_len
, GFP_KERNEL
);
326 memcpy(uvt
->info
, info
, uvt
->info_len
);
327 uvt
->resp
.present
|= IB_UCM_PRES_INFO
;
337 static int ib_ucm_event_handler(struct ib_cm_id
*cm_id
,
338 struct ib_cm_event
*event
)
340 struct ib_ucm_event
*uevent
;
341 struct ib_ucm_context
*ctx
;
344 ctx
= cm_id
->context
;
346 uevent
= kmalloc(sizeof(*uevent
), GFP_KERNEL
);
350 memset(uevent
, 0, sizeof(*uevent
));
352 uevent
->cm_id
= cm_id
;
353 uevent
->resp
.uid
= ctx
->uid
;
354 uevent
->resp
.id
= ctx
->id
;
355 uevent
->resp
.event
= event
->event
;
357 result
= ib_ucm_event_process(event
, uevent
);
361 down(&ctx
->file
->mutex
);
362 list_add_tail(&uevent
->file_list
, &ctx
->file
->events
);
363 list_add_tail(&uevent
->ctx_list
, &ctx
->events
);
364 wake_up_interruptible(&ctx
->file
->poll_wait
);
365 up(&ctx
->file
->mutex
);
371 /* Destroy new cm_id's */
372 return ib_ucm_new_cm_id(event
->event
);
375 static ssize_t
ib_ucm_event(struct ib_ucm_file
*file
,
376 const char __user
*inbuf
,
377 int in_len
, int out_len
)
379 struct ib_ucm_context
*ctx
;
380 struct ib_ucm_event_get cmd
;
381 struct ib_ucm_event
*uevent
;
385 if (out_len
< sizeof(struct ib_ucm_event_resp
))
388 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
394 while (list_empty(&file
->events
)) {
396 if (file
->filp
->f_flags
& O_NONBLOCK
) {
401 if (signal_pending(current
)) {
402 result
= -ERESTARTSYS
;
406 prepare_to_wait(&file
->poll_wait
, &wait
, TASK_INTERRUPTIBLE
);
412 finish_wait(&file
->poll_wait
, &wait
);
418 uevent
= list_entry(file
->events
.next
, struct ib_ucm_event
, file_list
);
420 if (ib_ucm_new_cm_id(uevent
->resp
.event
)) {
421 ctx
= ib_ucm_ctx_alloc(file
);
427 ctx
->cm_id
= uevent
->cm_id
;
428 ctx
->cm_id
->context
= ctx
;
429 uevent
->resp
.id
= ctx
->id
;
432 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
433 &uevent
->resp
, sizeof(uevent
->resp
))) {
439 if (cmd
.data_len
< uevent
->data_len
) {
443 if (copy_to_user((void __user
*)(unsigned long)cmd
.data
,
444 uevent
->data
, uevent
->data_len
)) {
451 if (cmd
.info_len
< uevent
->info_len
) {
455 if (copy_to_user((void __user
*)(unsigned long)cmd
.info
,
456 uevent
->info
, uevent
->info_len
)) {
462 list_del(&uevent
->file_list
);
463 list_del(&uevent
->ctx_list
);
464 uevent
->ctx
->events_reported
++;
475 static ssize_t
ib_ucm_create_id(struct ib_ucm_file
*file
,
476 const char __user
*inbuf
,
477 int in_len
, int out_len
)
479 struct ib_ucm_create_id cmd
;
480 struct ib_ucm_create_id_resp resp
;
481 struct ib_ucm_context
*ctx
;
484 if (out_len
< sizeof(resp
))
487 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
491 ctx
= ib_ucm_ctx_alloc(file
);
497 ctx
->cm_id
= ib_create_cm_id(ib_ucm_event_handler
, ctx
);
498 if (IS_ERR(ctx
->cm_id
)) {
499 result
= PTR_ERR(ctx
->cm_id
);
504 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
505 &resp
, sizeof(resp
))) {
514 idr_remove(&ctx_id_table
, ctx
->id
);
517 if (!IS_ERR(ctx
->cm_id
))
518 ib_destroy_cm_id(ctx
->cm_id
);
524 static ssize_t
ib_ucm_destroy_id(struct ib_ucm_file
*file
,
525 const char __user
*inbuf
,
526 int in_len
, int out_len
)
528 struct ib_ucm_destroy_id cmd
;
529 struct ib_ucm_destroy_id_resp resp
;
530 struct ib_ucm_context
*ctx
;
533 if (out_len
< sizeof(resp
))
536 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
540 ctx
= idr_find(&ctx_id_table
, cmd
.id
);
542 ctx
= ERR_PTR(-ENOENT
);
543 else if (ctx
->file
!= file
)
544 ctx
= ERR_PTR(-EINVAL
);
546 idr_remove(&ctx_id_table
, ctx
->id
);
552 atomic_dec(&ctx
->ref
);
553 wait_event(ctx
->wait
, !atomic_read(&ctx
->ref
));
555 /* No new events will be generated after destroying the cm_id. */
556 ib_destroy_cm_id(ctx
->cm_id
);
557 /* Cleanup events not yet reported to the user. */
558 ib_ucm_cleanup_events(ctx
);
560 resp
.events_reported
= ctx
->events_reported
;
561 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
562 &resp
, sizeof(resp
)))
569 static ssize_t
ib_ucm_attr_id(struct ib_ucm_file
*file
,
570 const char __user
*inbuf
,
571 int in_len
, int out_len
)
573 struct ib_ucm_attr_id_resp resp
;
574 struct ib_ucm_attr_id cmd
;
575 struct ib_ucm_context
*ctx
;
578 if (out_len
< sizeof(resp
))
581 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
584 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
588 resp
.service_id
= ctx
->cm_id
->service_id
;
589 resp
.service_mask
= ctx
->cm_id
->service_mask
;
590 resp
.local_id
= ctx
->cm_id
->local_id
;
591 resp
.remote_id
= ctx
->cm_id
->remote_id
;
593 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
594 &resp
, sizeof(resp
)))
601 static void ib_ucm_copy_ah_attr(struct ib_ucm_ah_attr
*dest_attr
,
602 struct ib_ah_attr
*src_attr
)
604 memcpy(dest_attr
->grh_dgid
, src_attr
->grh
.dgid
.raw
,
605 sizeof src_attr
->grh
.dgid
);
606 dest_attr
->grh_flow_label
= src_attr
->grh
.flow_label
;
607 dest_attr
->grh_sgid_index
= src_attr
->grh
.sgid_index
;
608 dest_attr
->grh_hop_limit
= src_attr
->grh
.hop_limit
;
609 dest_attr
->grh_traffic_class
= src_attr
->grh
.traffic_class
;
611 dest_attr
->dlid
= src_attr
->dlid
;
612 dest_attr
->sl
= src_attr
->sl
;
613 dest_attr
->src_path_bits
= src_attr
->src_path_bits
;
614 dest_attr
->static_rate
= src_attr
->static_rate
;
615 dest_attr
->is_global
= (src_attr
->ah_flags
& IB_AH_GRH
);
616 dest_attr
->port_num
= src_attr
->port_num
;
619 static void ib_ucm_copy_qp_attr(struct ib_ucm_init_qp_attr_resp
*dest_attr
,
620 struct ib_qp_attr
*src_attr
)
622 dest_attr
->cur_qp_state
= src_attr
->cur_qp_state
;
623 dest_attr
->path_mtu
= src_attr
->path_mtu
;
624 dest_attr
->path_mig_state
= src_attr
->path_mig_state
;
625 dest_attr
->qkey
= src_attr
->qkey
;
626 dest_attr
->rq_psn
= src_attr
->rq_psn
;
627 dest_attr
->sq_psn
= src_attr
->sq_psn
;
628 dest_attr
->dest_qp_num
= src_attr
->dest_qp_num
;
629 dest_attr
->qp_access_flags
= src_attr
->qp_access_flags
;
631 dest_attr
->max_send_wr
= src_attr
->cap
.max_send_wr
;
632 dest_attr
->max_recv_wr
= src_attr
->cap
.max_recv_wr
;
633 dest_attr
->max_send_sge
= src_attr
->cap
.max_send_sge
;
634 dest_attr
->max_recv_sge
= src_attr
->cap
.max_recv_sge
;
635 dest_attr
->max_inline_data
= src_attr
->cap
.max_inline_data
;
637 ib_ucm_copy_ah_attr(&dest_attr
->ah_attr
, &src_attr
->ah_attr
);
638 ib_ucm_copy_ah_attr(&dest_attr
->alt_ah_attr
, &src_attr
->alt_ah_attr
);
640 dest_attr
->pkey_index
= src_attr
->pkey_index
;
641 dest_attr
->alt_pkey_index
= src_attr
->alt_pkey_index
;
642 dest_attr
->en_sqd_async_notify
= src_attr
->en_sqd_async_notify
;
643 dest_attr
->sq_draining
= src_attr
->sq_draining
;
644 dest_attr
->max_rd_atomic
= src_attr
->max_rd_atomic
;
645 dest_attr
->max_dest_rd_atomic
= src_attr
->max_dest_rd_atomic
;
646 dest_attr
->min_rnr_timer
= src_attr
->min_rnr_timer
;
647 dest_attr
->port_num
= src_attr
->port_num
;
648 dest_attr
->timeout
= src_attr
->timeout
;
649 dest_attr
->retry_cnt
= src_attr
->retry_cnt
;
650 dest_attr
->rnr_retry
= src_attr
->rnr_retry
;
651 dest_attr
->alt_port_num
= src_attr
->alt_port_num
;
652 dest_attr
->alt_timeout
= src_attr
->alt_timeout
;
655 static ssize_t
ib_ucm_init_qp_attr(struct ib_ucm_file
*file
,
656 const char __user
*inbuf
,
657 int in_len
, int out_len
)
659 struct ib_ucm_init_qp_attr_resp resp
;
660 struct ib_ucm_init_qp_attr cmd
;
661 struct ib_ucm_context
*ctx
;
662 struct ib_qp_attr qp_attr
;
665 if (out_len
< sizeof(resp
))
668 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
671 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
675 resp
.qp_attr_mask
= 0;
676 memset(&qp_attr
, 0, sizeof qp_attr
);
677 qp_attr
.qp_state
= cmd
.qp_state
;
678 result
= ib_cm_init_qp_attr(ctx
->cm_id
, &qp_attr
, &resp
.qp_attr_mask
);
682 ib_ucm_copy_qp_attr(&resp
, &qp_attr
);
684 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
685 &resp
, sizeof(resp
)))
693 static ssize_t
ib_ucm_listen(struct ib_ucm_file
*file
,
694 const char __user
*inbuf
,
695 int in_len
, int out_len
)
697 struct ib_ucm_listen cmd
;
698 struct ib_ucm_context
*ctx
;
701 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
704 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
708 result
= ib_cm_listen(ctx
->cm_id
, cmd
.service_id
, cmd
.service_mask
);
713 static ssize_t
ib_ucm_establish(struct ib_ucm_file
*file
,
714 const char __user
*inbuf
,
715 int in_len
, int out_len
)
717 struct ib_ucm_establish cmd
;
718 struct ib_ucm_context
*ctx
;
721 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
724 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
728 result
= ib_cm_establish(ctx
->cm_id
);
733 static int ib_ucm_alloc_data(const void **dest
, u64 src
, u32 len
)
742 data
= kmalloc(len
, GFP_KERNEL
);
746 if (copy_from_user(data
, (void __user
*)(unsigned long)src
, len
)) {
755 static int ib_ucm_path_get(struct ib_sa_path_rec
**path
, u64 src
)
757 struct ib_ucm_path_rec ucm_path
;
758 struct ib_sa_path_rec
*sa_path
;
765 sa_path
= kmalloc(sizeof(*sa_path
), GFP_KERNEL
);
769 if (copy_from_user(&ucm_path
, (void __user
*)(unsigned long)src
,
776 memcpy(sa_path
->dgid
.raw
, ucm_path
.dgid
, sizeof sa_path
->dgid
);
777 memcpy(sa_path
->sgid
.raw
, ucm_path
.sgid
, sizeof sa_path
->sgid
);
779 sa_path
->dlid
= ucm_path
.dlid
;
780 sa_path
->slid
= ucm_path
.slid
;
781 sa_path
->raw_traffic
= ucm_path
.raw_traffic
;
782 sa_path
->flow_label
= ucm_path
.flow_label
;
783 sa_path
->hop_limit
= ucm_path
.hop_limit
;
784 sa_path
->traffic_class
= ucm_path
.traffic_class
;
785 sa_path
->reversible
= ucm_path
.reversible
;
786 sa_path
->numb_path
= ucm_path
.numb_path
;
787 sa_path
->pkey
= ucm_path
.pkey
;
788 sa_path
->sl
= ucm_path
.sl
;
789 sa_path
->mtu_selector
= ucm_path
.mtu_selector
;
790 sa_path
->mtu
= ucm_path
.mtu
;
791 sa_path
->rate_selector
= ucm_path
.rate_selector
;
792 sa_path
->rate
= ucm_path
.rate
;
793 sa_path
->packet_life_time
= ucm_path
.packet_life_time
;
794 sa_path
->preference
= ucm_path
.preference
;
796 sa_path
->packet_life_time_selector
=
797 ucm_path
.packet_life_time_selector
;
803 static ssize_t
ib_ucm_send_req(struct ib_ucm_file
*file
,
804 const char __user
*inbuf
,
805 int in_len
, int out_len
)
807 struct ib_cm_req_param param
;
808 struct ib_ucm_context
*ctx
;
809 struct ib_ucm_req cmd
;
812 param
.private_data
= NULL
;
813 param
.primary_path
= NULL
;
814 param
.alternate_path
= NULL
;
816 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
819 result
= ib_ucm_alloc_data(¶m
.private_data
, cmd
.data
, cmd
.len
);
823 result
= ib_ucm_path_get(¶m
.primary_path
, cmd
.primary_path
);
827 result
= ib_ucm_path_get(¶m
.alternate_path
, cmd
.alternate_path
);
831 param
.private_data_len
= cmd
.len
;
832 param
.service_id
= cmd
.sid
;
833 param
.qp_num
= cmd
.qpn
;
834 param
.qp_type
= cmd
.qp_type
;
835 param
.starting_psn
= cmd
.psn
;
836 param
.peer_to_peer
= cmd
.peer_to_peer
;
837 param
.responder_resources
= cmd
.responder_resources
;
838 param
.initiator_depth
= cmd
.initiator_depth
;
839 param
.remote_cm_response_timeout
= cmd
.remote_cm_response_timeout
;
840 param
.flow_control
= cmd
.flow_control
;
841 param
.local_cm_response_timeout
= cmd
.local_cm_response_timeout
;
842 param
.retry_count
= cmd
.retry_count
;
843 param
.rnr_retry_count
= cmd
.rnr_retry_count
;
844 param
.max_cm_retries
= cmd
.max_cm_retries
;
847 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
849 result
= ib_send_cm_req(ctx
->cm_id
, ¶m
);
852 result
= PTR_ERR(ctx
);
855 kfree(param
.private_data
);
856 kfree(param
.primary_path
);
857 kfree(param
.alternate_path
);
861 static ssize_t
ib_ucm_send_rep(struct ib_ucm_file
*file
,
862 const char __user
*inbuf
,
863 int in_len
, int out_len
)
865 struct ib_cm_rep_param param
;
866 struct ib_ucm_context
*ctx
;
867 struct ib_ucm_rep cmd
;
870 param
.private_data
= NULL
;
872 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
875 result
= ib_ucm_alloc_data(¶m
.private_data
, cmd
.data
, cmd
.len
);
879 param
.qp_num
= cmd
.qpn
;
880 param
.starting_psn
= cmd
.psn
;
881 param
.private_data_len
= cmd
.len
;
882 param
.responder_resources
= cmd
.responder_resources
;
883 param
.initiator_depth
= cmd
.initiator_depth
;
884 param
.target_ack_delay
= cmd
.target_ack_delay
;
885 param
.failover_accepted
= cmd
.failover_accepted
;
886 param
.flow_control
= cmd
.flow_control
;
887 param
.rnr_retry_count
= cmd
.rnr_retry_count
;
890 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
893 result
= ib_send_cm_rep(ctx
->cm_id
, ¶m
);
896 result
= PTR_ERR(ctx
);
898 kfree(param
.private_data
);
902 static ssize_t
ib_ucm_send_private_data(struct ib_ucm_file
*file
,
903 const char __user
*inbuf
, int in_len
,
904 int (*func
)(struct ib_cm_id
*cm_id
,
905 const void *private_data
,
906 u8 private_data_len
))
908 struct ib_ucm_private_data cmd
;
909 struct ib_ucm_context
*ctx
;
910 const void *private_data
= NULL
;
913 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
916 result
= ib_ucm_alloc_data(&private_data
, cmd
.data
, cmd
.len
);
920 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
922 result
= func(ctx
->cm_id
, private_data
, cmd
.len
);
925 result
= PTR_ERR(ctx
);
931 static ssize_t
ib_ucm_send_rtu(struct ib_ucm_file
*file
,
932 const char __user
*inbuf
,
933 int in_len
, int out_len
)
935 return ib_ucm_send_private_data(file
, inbuf
, in_len
, ib_send_cm_rtu
);
938 static ssize_t
ib_ucm_send_dreq(struct ib_ucm_file
*file
,
939 const char __user
*inbuf
,
940 int in_len
, int out_len
)
942 return ib_ucm_send_private_data(file
, inbuf
, in_len
, ib_send_cm_dreq
);
945 static ssize_t
ib_ucm_send_drep(struct ib_ucm_file
*file
,
946 const char __user
*inbuf
,
947 int in_len
, int out_len
)
949 return ib_ucm_send_private_data(file
, inbuf
, in_len
, ib_send_cm_drep
);
952 static ssize_t
ib_ucm_send_info(struct ib_ucm_file
*file
,
953 const char __user
*inbuf
, int in_len
,
954 int (*func
)(struct ib_cm_id
*cm_id
,
961 struct ib_ucm_context
*ctx
;
962 struct ib_ucm_info cmd
;
963 const void *data
= NULL
;
964 const void *info
= NULL
;
967 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
970 result
= ib_ucm_alloc_data(&data
, cmd
.data
, cmd
.data_len
);
974 result
= ib_ucm_alloc_data(&info
, cmd
.info
, cmd
.info_len
);
978 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
980 result
= func(ctx
->cm_id
, cmd
.status
, info
, cmd
.info_len
,
984 result
= PTR_ERR(ctx
);
992 static ssize_t
ib_ucm_send_rej(struct ib_ucm_file
*file
,
993 const char __user
*inbuf
,
994 int in_len
, int out_len
)
996 return ib_ucm_send_info(file
, inbuf
, in_len
, (void *)ib_send_cm_rej
);
999 static ssize_t
ib_ucm_send_apr(struct ib_ucm_file
*file
,
1000 const char __user
*inbuf
,
1001 int in_len
, int out_len
)
1003 return ib_ucm_send_info(file
, inbuf
, in_len
, (void *)ib_send_cm_apr
);
1006 static ssize_t
ib_ucm_send_mra(struct ib_ucm_file
*file
,
1007 const char __user
*inbuf
,
1008 int in_len
, int out_len
)
1010 struct ib_ucm_context
*ctx
;
1011 struct ib_ucm_mra cmd
;
1012 const void *data
= NULL
;
1015 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1018 result
= ib_ucm_alloc_data(&data
, cmd
.data
, cmd
.len
);
1022 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
1024 result
= ib_send_cm_mra(ctx
->cm_id
, cmd
.timeout
, data
, cmd
.len
);
1025 ib_ucm_ctx_put(ctx
);
1027 result
= PTR_ERR(ctx
);
1033 static ssize_t
ib_ucm_send_lap(struct ib_ucm_file
*file
,
1034 const char __user
*inbuf
,
1035 int in_len
, int out_len
)
1037 struct ib_ucm_context
*ctx
;
1038 struct ib_sa_path_rec
*path
= NULL
;
1039 struct ib_ucm_lap cmd
;
1040 const void *data
= NULL
;
1043 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1046 result
= ib_ucm_alloc_data(&data
, cmd
.data
, cmd
.len
);
1050 result
= ib_ucm_path_get(&path
, cmd
.path
);
1054 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
1056 result
= ib_send_cm_lap(ctx
->cm_id
, path
, data
, cmd
.len
);
1057 ib_ucm_ctx_put(ctx
);
1059 result
= PTR_ERR(ctx
);
1067 static ssize_t
ib_ucm_send_sidr_req(struct ib_ucm_file
*file
,
1068 const char __user
*inbuf
,
1069 int in_len
, int out_len
)
1071 struct ib_cm_sidr_req_param param
;
1072 struct ib_ucm_context
*ctx
;
1073 struct ib_ucm_sidr_req cmd
;
1076 param
.private_data
= NULL
;
1079 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1082 result
= ib_ucm_alloc_data(¶m
.private_data
, cmd
.data
, cmd
.len
);
1086 result
= ib_ucm_path_get(¶m
.path
, cmd
.path
);
1090 param
.private_data_len
= cmd
.len
;
1091 param
.service_id
= cmd
.sid
;
1092 param
.timeout_ms
= cmd
.timeout
;
1093 param
.max_cm_retries
= cmd
.max_cm_retries
;
1094 param
.pkey
= cmd
.pkey
;
1096 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
1098 result
= ib_send_cm_sidr_req(ctx
->cm_id
, ¶m
);
1099 ib_ucm_ctx_put(ctx
);
1101 result
= PTR_ERR(ctx
);
1104 kfree(param
.private_data
);
1109 static ssize_t
ib_ucm_send_sidr_rep(struct ib_ucm_file
*file
,
1110 const char __user
*inbuf
,
1111 int in_len
, int out_len
)
1113 struct ib_cm_sidr_rep_param param
;
1114 struct ib_ucm_sidr_rep cmd
;
1115 struct ib_ucm_context
*ctx
;
1120 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1123 result
= ib_ucm_alloc_data(¶m
.private_data
,
1124 cmd
.data
, cmd
.data_len
);
1128 result
= ib_ucm_alloc_data(¶m
.info
, cmd
.info
, cmd
.info_len
);
1132 param
.qp_num
= cmd
.qpn
;
1133 param
.qkey
= cmd
.qkey
;
1134 param
.status
= cmd
.status
;
1135 param
.info_length
= cmd
.info_len
;
1136 param
.private_data_len
= cmd
.data_len
;
1138 ctx
= ib_ucm_ctx_get(file
, cmd
.id
);
1140 result
= ib_send_cm_sidr_rep(ctx
->cm_id
, ¶m
);
1141 ib_ucm_ctx_put(ctx
);
1143 result
= PTR_ERR(ctx
);
1146 kfree(param
.private_data
);
1151 static ssize_t (*ucm_cmd_table
[])(struct ib_ucm_file
*file
,
1152 const char __user
*inbuf
,
1153 int in_len
, int out_len
) = {
1154 [IB_USER_CM_CMD_CREATE_ID
] = ib_ucm_create_id
,
1155 [IB_USER_CM_CMD_DESTROY_ID
] = ib_ucm_destroy_id
,
1156 [IB_USER_CM_CMD_ATTR_ID
] = ib_ucm_attr_id
,
1157 [IB_USER_CM_CMD_LISTEN
] = ib_ucm_listen
,
1158 [IB_USER_CM_CMD_ESTABLISH
] = ib_ucm_establish
,
1159 [IB_USER_CM_CMD_SEND_REQ
] = ib_ucm_send_req
,
1160 [IB_USER_CM_CMD_SEND_REP
] = ib_ucm_send_rep
,
1161 [IB_USER_CM_CMD_SEND_RTU
] = ib_ucm_send_rtu
,
1162 [IB_USER_CM_CMD_SEND_DREQ
] = ib_ucm_send_dreq
,
1163 [IB_USER_CM_CMD_SEND_DREP
] = ib_ucm_send_drep
,
1164 [IB_USER_CM_CMD_SEND_REJ
] = ib_ucm_send_rej
,
1165 [IB_USER_CM_CMD_SEND_MRA
] = ib_ucm_send_mra
,
1166 [IB_USER_CM_CMD_SEND_LAP
] = ib_ucm_send_lap
,
1167 [IB_USER_CM_CMD_SEND_APR
] = ib_ucm_send_apr
,
1168 [IB_USER_CM_CMD_SEND_SIDR_REQ
] = ib_ucm_send_sidr_req
,
1169 [IB_USER_CM_CMD_SEND_SIDR_REP
] = ib_ucm_send_sidr_rep
,
1170 [IB_USER_CM_CMD_EVENT
] = ib_ucm_event
,
1171 [IB_USER_CM_CMD_INIT_QP_ATTR
] = ib_ucm_init_qp_attr
,
1174 static ssize_t
ib_ucm_write(struct file
*filp
, const char __user
*buf
,
1175 size_t len
, loff_t
*pos
)
1177 struct ib_ucm_file
*file
= filp
->private_data
;
1178 struct ib_ucm_cmd_hdr hdr
;
1181 if (len
< sizeof(hdr
))
1184 if (copy_from_user(&hdr
, buf
, sizeof(hdr
)))
1187 ucm_dbg("Write. cmd <%d> in <%d> out <%d> len <%Zu>\n",
1188 hdr
.cmd
, hdr
.in
, hdr
.out
, len
);
1190 if (hdr
.cmd
< 0 || hdr
.cmd
>= ARRAY_SIZE(ucm_cmd_table
))
1193 if (hdr
.in
+ sizeof(hdr
) > len
)
1196 result
= ucm_cmd_table
[hdr
.cmd
](file
, buf
+ sizeof(hdr
),
1204 static unsigned int ib_ucm_poll(struct file
*filp
,
1205 struct poll_table_struct
*wait
)
1207 struct ib_ucm_file
*file
= filp
->private_data
;
1208 unsigned int mask
= 0;
1210 poll_wait(filp
, &file
->poll_wait
, wait
);
1212 if (!list_empty(&file
->events
))
1213 mask
= POLLIN
| POLLRDNORM
;
1218 static int ib_ucm_open(struct inode
*inode
, struct file
*filp
)
1220 struct ib_ucm_file
*file
;
1222 file
= kmalloc(sizeof(*file
), GFP_KERNEL
);
1226 INIT_LIST_HEAD(&file
->events
);
1227 INIT_LIST_HEAD(&file
->ctxs
);
1228 init_waitqueue_head(&file
->poll_wait
);
1230 init_MUTEX(&file
->mutex
);
1232 filp
->private_data
= file
;
1235 ucm_dbg("Created struct\n");
1240 static int ib_ucm_close(struct inode
*inode
, struct file
*filp
)
1242 struct ib_ucm_file
*file
= filp
->private_data
;
1243 struct ib_ucm_context
*ctx
;
1246 while (!list_empty(&file
->ctxs
)) {
1247 ctx
= list_entry(file
->ctxs
.next
,
1248 struct ib_ucm_context
, file_list
);
1251 down(&ctx_id_mutex
);
1252 idr_remove(&ctx_id_table
, ctx
->id
);
1255 ib_destroy_cm_id(ctx
->cm_id
);
1256 ib_ucm_cleanup_events(ctx
);
1266 static struct file_operations ib_ucm_fops
= {
1267 .owner
= THIS_MODULE
,
1268 .open
= ib_ucm_open
,
1269 .release
= ib_ucm_close
,
1270 .write
= ib_ucm_write
,
1271 .poll
= ib_ucm_poll
,
1275 static struct class *ib_ucm_class
;
1276 static struct cdev ib_ucm_cdev
;
1278 static int __init
ib_ucm_init(void)
1282 result
= register_chrdev_region(IB_UCM_DEV
, 1, "infiniband_cm");
1284 ucm_dbg("Error <%d> registering dev\n", result
);
1288 cdev_init(&ib_ucm_cdev
, &ib_ucm_fops
);
1290 result
= cdev_add(&ib_ucm_cdev
, IB_UCM_DEV
, 1);
1292 ucm_dbg("Error <%d> adding cdev\n", result
);
1296 ib_ucm_class
= class_create(THIS_MODULE
, "infiniband_cm");
1297 if (IS_ERR(ib_ucm_class
)) {
1298 result
= PTR_ERR(ib_ucm_class
);
1299 ucm_dbg("Error <%d> creating class\n", result
);
1303 class_device_create(ib_ucm_class
, IB_UCM_DEV
, NULL
, "ucm");
1305 idr_init(&ctx_id_table
);
1306 init_MUTEX(&ctx_id_mutex
);
1310 cdev_del(&ib_ucm_cdev
);
1312 unregister_chrdev_region(IB_UCM_DEV
, 1);
1317 static void __exit
ib_ucm_cleanup(void)
1319 class_device_destroy(ib_ucm_class
, IB_UCM_DEV
);
1320 class_destroy(ib_ucm_class
);
1321 cdev_del(&ib_ucm_cdev
);
1322 unregister_chrdev_region(IB_UCM_DEV
, 1);
1325 module_init(ib_ucm_init
);
1326 module_exit(ib_ucm_cleanup
);