2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2008 Cisco. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/device.h>
39 #include <linux/err.h>
41 #include <linux/cdev.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/kref.h>
46 #include <linux/compat.h>
47 #include <linux/sched.h>
48 #include <linux/semaphore.h>
50 #include <asm/uaccess.h>
52 #include <rdma/ib_mad.h>
53 #include <rdma/ib_user_mad.h>
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
57 MODULE_LICENSE("Dual BSD/GPL");
60 IB_UMAD_MAX_PORTS
= 64,
61 IB_UMAD_MAX_AGENTS
= 32,
64 IB_UMAD_MINOR_BASE
= 0
68 * Our lifetime rules for these structs are the following:
69 * device special file is opened, we take a reference on the
70 * ib_umad_port's struct ib_umad_device. We drop these
71 * references in the corresponding close().
73 * In addition to references coming from open character devices, there
74 * is one more reference to each ib_umad_device representing the
75 * module's reference taken when allocating the ib_umad_device in
78 * When destroying an ib_umad_device, we drop the module's reference.
86 struct device
*sm_dev
;
87 struct semaphore sm_sem
;
89 struct mutex file_mutex
;
90 struct list_head file_list
;
92 struct ib_device
*ib_dev
;
93 struct ib_umad_device
*umad_dev
;
98 struct ib_umad_device
{
99 int start_port
, end_port
;
101 struct ib_umad_port port
[0];
104 struct ib_umad_file
{
106 struct ib_umad_port
*port
;
107 struct list_head recv_list
;
108 struct list_head send_list
;
109 struct list_head port_list
;
110 spinlock_t send_lock
;
111 wait_queue_head_t recv_wait
;
112 struct ib_mad_agent
*agent
[IB_UMAD_MAX_AGENTS
];
118 struct ib_umad_packet
{
119 struct ib_mad_send_buf
*msg
;
120 struct ib_mad_recv_wc
*recv_wc
;
121 struct list_head list
;
123 struct ib_user_mad mad
;
126 static struct class *umad_class
;
128 static const dev_t base_dev
= MKDEV(IB_UMAD_MAJOR
, IB_UMAD_MINOR_BASE
);
130 static DEFINE_SPINLOCK(port_lock
);
131 static DECLARE_BITMAP(dev_map
, IB_UMAD_MAX_PORTS
);
133 static void ib_umad_add_one(struct ib_device
*device
);
134 static void ib_umad_remove_one(struct ib_device
*device
);
136 static void ib_umad_release_dev(struct kref
*ref
)
138 struct ib_umad_device
*dev
=
139 container_of(ref
, struct ib_umad_device
, ref
);
144 static int hdr_size(struct ib_umad_file
*file
)
146 return file
->use_pkey_index
? sizeof (struct ib_user_mad_hdr
) :
147 sizeof (struct ib_user_mad_hdr_old
);
150 /* caller must hold file->mutex */
151 static struct ib_mad_agent
*__get_agent(struct ib_umad_file
*file
, int id
)
153 return file
->agents_dead
? NULL
: file
->agent
[id
];
156 static int queue_packet(struct ib_umad_file
*file
,
157 struct ib_mad_agent
*agent
,
158 struct ib_umad_packet
*packet
)
162 mutex_lock(&file
->mutex
);
164 for (packet
->mad
.hdr
.id
= 0;
165 packet
->mad
.hdr
.id
< IB_UMAD_MAX_AGENTS
;
166 packet
->mad
.hdr
.id
++)
167 if (agent
== __get_agent(file
, packet
->mad
.hdr
.id
)) {
168 list_add_tail(&packet
->list
, &file
->recv_list
);
169 wake_up_interruptible(&file
->recv_wait
);
174 mutex_unlock(&file
->mutex
);
179 static void dequeue_send(struct ib_umad_file
*file
,
180 struct ib_umad_packet
*packet
)
182 spin_lock_irq(&file
->send_lock
);
183 list_del(&packet
->list
);
184 spin_unlock_irq(&file
->send_lock
);
187 static void send_handler(struct ib_mad_agent
*agent
,
188 struct ib_mad_send_wc
*send_wc
)
190 struct ib_umad_file
*file
= agent
->context
;
191 struct ib_umad_packet
*packet
= send_wc
->send_buf
->context
[0];
193 dequeue_send(file
, packet
);
194 ib_destroy_ah(packet
->msg
->ah
);
195 ib_free_send_mad(packet
->msg
);
197 if (send_wc
->status
== IB_WC_RESP_TIMEOUT_ERR
) {
198 packet
->length
= IB_MGMT_MAD_HDR
;
199 packet
->mad
.hdr
.status
= ETIMEDOUT
;
200 if (!queue_packet(file
, agent
, packet
))
206 static void recv_handler(struct ib_mad_agent
*agent
,
207 struct ib_mad_recv_wc
*mad_recv_wc
)
209 struct ib_umad_file
*file
= agent
->context
;
210 struct ib_umad_packet
*packet
;
212 if (mad_recv_wc
->wc
->status
!= IB_WC_SUCCESS
)
215 packet
= kzalloc(sizeof *packet
, GFP_KERNEL
);
219 packet
->length
= mad_recv_wc
->mad_len
;
220 packet
->recv_wc
= mad_recv_wc
;
222 packet
->mad
.hdr
.status
= 0;
223 packet
->mad
.hdr
.length
= hdr_size(file
) + mad_recv_wc
->mad_len
;
224 packet
->mad
.hdr
.qpn
= cpu_to_be32(mad_recv_wc
->wc
->src_qp
);
225 packet
->mad
.hdr
.lid
= cpu_to_be16(mad_recv_wc
->wc
->slid
);
226 packet
->mad
.hdr
.sl
= mad_recv_wc
->wc
->sl
;
227 packet
->mad
.hdr
.path_bits
= mad_recv_wc
->wc
->dlid_path_bits
;
228 packet
->mad
.hdr
.pkey_index
= mad_recv_wc
->wc
->pkey_index
;
229 packet
->mad
.hdr
.grh_present
= !!(mad_recv_wc
->wc
->wc_flags
& IB_WC_GRH
);
230 if (packet
->mad
.hdr
.grh_present
) {
231 struct ib_ah_attr ah_attr
;
233 ib_init_ah_from_wc(agent
->device
, agent
->port_num
,
234 mad_recv_wc
->wc
, mad_recv_wc
->recv_buf
.grh
,
237 packet
->mad
.hdr
.gid_index
= ah_attr
.grh
.sgid_index
;
238 packet
->mad
.hdr
.hop_limit
= ah_attr
.grh
.hop_limit
;
239 packet
->mad
.hdr
.traffic_class
= ah_attr
.grh
.traffic_class
;
240 memcpy(packet
->mad
.hdr
.gid
, &ah_attr
.grh
.dgid
, 16);
241 packet
->mad
.hdr
.flow_label
= cpu_to_be32(ah_attr
.grh
.flow_label
);
244 if (queue_packet(file
, agent
, packet
))
251 ib_free_recv_mad(mad_recv_wc
);
254 static ssize_t
copy_recv_mad(struct ib_umad_file
*file
, char __user
*buf
,
255 struct ib_umad_packet
*packet
, size_t count
)
257 struct ib_mad_recv_buf
*recv_buf
;
258 int left
, seg_payload
, offset
, max_seg_payload
;
260 /* We need enough room to copy the first (or only) MAD segment. */
261 recv_buf
= &packet
->recv_wc
->recv_buf
;
262 if ((packet
->length
<= sizeof (*recv_buf
->mad
) &&
263 count
< hdr_size(file
) + packet
->length
) ||
264 (packet
->length
> sizeof (*recv_buf
->mad
) &&
265 count
< hdr_size(file
) + sizeof (*recv_buf
->mad
)))
268 if (copy_to_user(buf
, &packet
->mad
, hdr_size(file
)))
271 buf
+= hdr_size(file
);
272 seg_payload
= min_t(int, packet
->length
, sizeof (*recv_buf
->mad
));
273 if (copy_to_user(buf
, recv_buf
->mad
, seg_payload
))
276 if (seg_payload
< packet
->length
) {
278 * Multipacket RMPP MAD message. Copy remainder of message.
279 * Note that last segment may have a shorter payload.
281 if (count
< hdr_size(file
) + packet
->length
) {
283 * The buffer is too small, return the first RMPP segment,
284 * which includes the RMPP message length.
288 offset
= ib_get_mad_data_offset(recv_buf
->mad
->mad_hdr
.mgmt_class
);
289 max_seg_payload
= sizeof (struct ib_mad
) - offset
;
291 for (left
= packet
->length
- seg_payload
, buf
+= seg_payload
;
292 left
; left
-= seg_payload
, buf
+= seg_payload
) {
293 recv_buf
= container_of(recv_buf
->list
.next
,
294 struct ib_mad_recv_buf
, list
);
295 seg_payload
= min(left
, max_seg_payload
);
296 if (copy_to_user(buf
, ((void *) recv_buf
->mad
) + offset
,
301 return hdr_size(file
) + packet
->length
;
304 static ssize_t
copy_send_mad(struct ib_umad_file
*file
, char __user
*buf
,
305 struct ib_umad_packet
*packet
, size_t count
)
307 ssize_t size
= hdr_size(file
) + packet
->length
;
312 if (copy_to_user(buf
, &packet
->mad
, hdr_size(file
)))
315 buf
+= hdr_size(file
);
317 if (copy_to_user(buf
, packet
->mad
.data
, packet
->length
))
323 static ssize_t
ib_umad_read(struct file
*filp
, char __user
*buf
,
324 size_t count
, loff_t
*pos
)
326 struct ib_umad_file
*file
= filp
->private_data
;
327 struct ib_umad_packet
*packet
;
330 if (count
< hdr_size(file
))
333 mutex_lock(&file
->mutex
);
335 while (list_empty(&file
->recv_list
)) {
336 mutex_unlock(&file
->mutex
);
338 if (filp
->f_flags
& O_NONBLOCK
)
341 if (wait_event_interruptible(file
->recv_wait
,
342 !list_empty(&file
->recv_list
)))
345 mutex_lock(&file
->mutex
);
348 packet
= list_entry(file
->recv_list
.next
, struct ib_umad_packet
, list
);
349 list_del(&packet
->list
);
351 mutex_unlock(&file
->mutex
);
354 ret
= copy_recv_mad(file
, buf
, packet
, count
);
356 ret
= copy_send_mad(file
, buf
, packet
, count
);
360 mutex_lock(&file
->mutex
);
361 list_add(&packet
->list
, &file
->recv_list
);
362 mutex_unlock(&file
->mutex
);
365 ib_free_recv_mad(packet
->recv_wc
);
371 static int copy_rmpp_mad(struct ib_mad_send_buf
*msg
, const char __user
*buf
)
375 /* Copy class specific header */
376 if ((msg
->hdr_len
> IB_MGMT_RMPP_HDR
) &&
377 copy_from_user(msg
->mad
+ IB_MGMT_RMPP_HDR
, buf
+ IB_MGMT_RMPP_HDR
,
378 msg
->hdr_len
- IB_MGMT_RMPP_HDR
))
381 /* All headers are in place. Copy data segments. */
382 for (seg
= 1, left
= msg
->data_len
, buf
+= msg
->hdr_len
; left
> 0;
383 seg
++, left
-= msg
->seg_size
, buf
+= msg
->seg_size
) {
384 if (copy_from_user(ib_get_rmpp_segment(msg
, seg
), buf
,
385 min(left
, msg
->seg_size
)))
391 static int same_destination(struct ib_user_mad_hdr
*hdr1
,
392 struct ib_user_mad_hdr
*hdr2
)
394 if (!hdr1
->grh_present
&& !hdr2
->grh_present
)
395 return (hdr1
->lid
== hdr2
->lid
);
397 if (hdr1
->grh_present
&& hdr2
->grh_present
)
398 return !memcmp(hdr1
->gid
, hdr2
->gid
, 16);
403 static int is_duplicate(struct ib_umad_file
*file
,
404 struct ib_umad_packet
*packet
)
406 struct ib_umad_packet
*sent_packet
;
407 struct ib_mad_hdr
*sent_hdr
, *hdr
;
409 hdr
= (struct ib_mad_hdr
*) packet
->mad
.data
;
410 list_for_each_entry(sent_packet
, &file
->send_list
, list
) {
411 sent_hdr
= (struct ib_mad_hdr
*) sent_packet
->mad
.data
;
413 if ((hdr
->tid
!= sent_hdr
->tid
) ||
414 (hdr
->mgmt_class
!= sent_hdr
->mgmt_class
))
418 * No need to be overly clever here. If two new operations have
419 * the same TID, reject the second as a duplicate. This is more
420 * restrictive than required by the spec.
422 if (!ib_response_mad((struct ib_mad
*) hdr
)) {
423 if (!ib_response_mad((struct ib_mad
*) sent_hdr
))
426 } else if (!ib_response_mad((struct ib_mad
*) sent_hdr
))
429 if (same_destination(&packet
->mad
.hdr
, &sent_packet
->mad
.hdr
))
436 static ssize_t
ib_umad_write(struct file
*filp
, const char __user
*buf
,
437 size_t count
, loff_t
*pos
)
439 struct ib_umad_file
*file
= filp
->private_data
;
440 struct ib_umad_packet
*packet
;
441 struct ib_mad_agent
*agent
;
442 struct ib_ah_attr ah_attr
;
444 struct ib_rmpp_mad
*rmpp_mad
;
446 int ret
, data_len
, hdr_len
, copy_offset
, rmpp_active
;
448 if (count
< hdr_size(file
) + IB_MGMT_RMPP_HDR
)
451 packet
= kzalloc(sizeof *packet
+ IB_MGMT_RMPP_HDR
, GFP_KERNEL
);
455 if (copy_from_user(&packet
->mad
, buf
, hdr_size(file
))) {
460 if (packet
->mad
.hdr
.id
< 0 ||
461 packet
->mad
.hdr
.id
>= IB_UMAD_MAX_AGENTS
) {
466 buf
+= hdr_size(file
);
468 if (copy_from_user(packet
->mad
.data
, buf
, IB_MGMT_RMPP_HDR
)) {
473 mutex_lock(&file
->mutex
);
475 agent
= __get_agent(file
, packet
->mad
.hdr
.id
);
481 memset(&ah_attr
, 0, sizeof ah_attr
);
482 ah_attr
.dlid
= be16_to_cpu(packet
->mad
.hdr
.lid
);
483 ah_attr
.sl
= packet
->mad
.hdr
.sl
;
484 ah_attr
.src_path_bits
= packet
->mad
.hdr
.path_bits
;
485 ah_attr
.port_num
= file
->port
->port_num
;
486 if (packet
->mad
.hdr
.grh_present
) {
487 ah_attr
.ah_flags
= IB_AH_GRH
;
488 memcpy(ah_attr
.grh
.dgid
.raw
, packet
->mad
.hdr
.gid
, 16);
489 ah_attr
.grh
.sgid_index
= packet
->mad
.hdr
.gid_index
;
490 ah_attr
.grh
.flow_label
= be32_to_cpu(packet
->mad
.hdr
.flow_label
);
491 ah_attr
.grh
.hop_limit
= packet
->mad
.hdr
.hop_limit
;
492 ah_attr
.grh
.traffic_class
= packet
->mad
.hdr
.traffic_class
;
495 ah
= ib_create_ah(agent
->qp
->pd
, &ah_attr
);
501 rmpp_mad
= (struct ib_rmpp_mad
*) packet
->mad
.data
;
502 hdr_len
= ib_get_mad_data_offset(rmpp_mad
->mad_hdr
.mgmt_class
);
503 if (!ib_is_mad_class_rmpp(rmpp_mad
->mad_hdr
.mgmt_class
)) {
504 copy_offset
= IB_MGMT_MAD_HDR
;
507 copy_offset
= IB_MGMT_RMPP_HDR
;
508 rmpp_active
= ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
509 IB_MGMT_RMPP_FLAG_ACTIVE
;
512 data_len
= count
- hdr_size(file
) - hdr_len
;
513 packet
->msg
= ib_create_send_mad(agent
,
514 be32_to_cpu(packet
->mad
.hdr
.qpn
),
515 packet
->mad
.hdr
.pkey_index
, rmpp_active
,
516 hdr_len
, data_len
, GFP_KERNEL
);
517 if (IS_ERR(packet
->msg
)) {
518 ret
= PTR_ERR(packet
->msg
);
522 packet
->msg
->ah
= ah
;
523 packet
->msg
->timeout_ms
= packet
->mad
.hdr
.timeout_ms
;
524 packet
->msg
->retries
= packet
->mad
.hdr
.retries
;
525 packet
->msg
->context
[0] = packet
;
527 /* Copy MAD header. Any RMPP header is already in place. */
528 memcpy(packet
->msg
->mad
, packet
->mad
.data
, IB_MGMT_MAD_HDR
);
531 if (copy_from_user(packet
->msg
->mad
+ copy_offset
,
533 hdr_len
+ data_len
- copy_offset
)) {
538 ret
= copy_rmpp_mad(packet
->msg
, buf
);
544 * Set the high-order part of the transaction ID to make MADs from
545 * different agents unique, and allow routing responses back to the
546 * original requestor.
548 if (!ib_response_mad(packet
->msg
->mad
)) {
549 tid
= &((struct ib_mad_hdr
*) packet
->msg
->mad
)->tid
;
550 *tid
= cpu_to_be64(((u64
) agent
->hi_tid
) << 32 |
551 (be64_to_cpup(tid
) & 0xffffffff));
552 rmpp_mad
->mad_hdr
.tid
= *tid
;
555 spin_lock_irq(&file
->send_lock
);
556 ret
= is_duplicate(file
, packet
);
558 list_add_tail(&packet
->list
, &file
->send_list
);
559 spin_unlock_irq(&file
->send_lock
);
565 ret
= ib_post_send_mad(packet
->msg
, NULL
);
569 mutex_unlock(&file
->mutex
);
573 dequeue_send(file
, packet
);
575 ib_free_send_mad(packet
->msg
);
579 mutex_unlock(&file
->mutex
);
585 static unsigned int ib_umad_poll(struct file
*filp
, struct poll_table_struct
*wait
)
587 struct ib_umad_file
*file
= filp
->private_data
;
589 /* we will always be able to post a MAD send */
590 unsigned int mask
= POLLOUT
| POLLWRNORM
;
592 poll_wait(filp
, &file
->recv_wait
, wait
);
594 if (!list_empty(&file
->recv_list
))
595 mask
|= POLLIN
| POLLRDNORM
;
600 static int ib_umad_reg_agent(struct ib_umad_file
*file
, void __user
*arg
,
601 int compat_method_mask
)
603 struct ib_user_mad_reg_req ureq
;
604 struct ib_mad_reg_req req
;
605 struct ib_mad_agent
*agent
= NULL
;
609 mutex_lock(&file
->port
->file_mutex
);
610 mutex_lock(&file
->mutex
);
612 if (!file
->port
->ib_dev
) {
617 if (copy_from_user(&ureq
, arg
, sizeof ureq
)) {
622 if (ureq
.qpn
!= 0 && ureq
.qpn
!= 1) {
627 for (agent_id
= 0; agent_id
< IB_UMAD_MAX_AGENTS
; ++agent_id
)
628 if (!__get_agent(file
, agent_id
))
635 if (ureq
.mgmt_class
) {
636 req
.mgmt_class
= ureq
.mgmt_class
;
637 req
.mgmt_class_version
= ureq
.mgmt_class_version
;
638 memcpy(req
.oui
, ureq
.oui
, sizeof req
.oui
);
640 if (compat_method_mask
) {
641 u32
*umm
= (u32
*) ureq
.method_mask
;
644 for (i
= 0; i
< BITS_TO_LONGS(IB_MGMT_MAX_METHODS
); ++i
)
646 umm
[i
* 2] | ((u64
) umm
[i
* 2 + 1] << 32);
648 memcpy(req
.method_mask
, ureq
.method_mask
,
649 sizeof req
.method_mask
);
652 agent
= ib_register_mad_agent(file
->port
->ib_dev
, file
->port
->port_num
,
653 ureq
.qpn
? IB_QPT_GSI
: IB_QPT_SMI
,
654 ureq
.mgmt_class
? &req
: NULL
,
656 send_handler
, recv_handler
, file
);
658 ret
= PTR_ERR(agent
);
663 if (put_user(agent_id
,
664 (u32 __user
*) (arg
+ offsetof(struct ib_user_mad_reg_req
, id
)))) {
669 if (!file
->already_used
) {
670 file
->already_used
= 1;
671 if (!file
->use_pkey_index
) {
672 printk(KERN_WARNING
"user_mad: process %s did not enable "
673 "P_Key index support.\n", current
->comm
);
674 printk(KERN_WARNING
"user_mad: Documentation/infiniband/user_mad.txt "
675 "has info on the new ABI.\n");
679 file
->agent
[agent_id
] = agent
;
683 mutex_unlock(&file
->mutex
);
686 ib_unregister_mad_agent(agent
);
688 mutex_unlock(&file
->port
->file_mutex
);
693 static int ib_umad_unreg_agent(struct ib_umad_file
*file
, u32 __user
*arg
)
695 struct ib_mad_agent
*agent
= NULL
;
699 if (get_user(id
, arg
))
702 mutex_lock(&file
->port
->file_mutex
);
703 mutex_lock(&file
->mutex
);
705 if (id
< 0 || id
>= IB_UMAD_MAX_AGENTS
|| !__get_agent(file
, id
)) {
710 agent
= file
->agent
[id
];
711 file
->agent
[id
] = NULL
;
714 mutex_unlock(&file
->mutex
);
717 ib_unregister_mad_agent(agent
);
719 mutex_unlock(&file
->port
->file_mutex
);
724 static long ib_umad_enable_pkey(struct ib_umad_file
*file
)
728 mutex_lock(&file
->mutex
);
729 if (file
->already_used
)
732 file
->use_pkey_index
= 1;
733 mutex_unlock(&file
->mutex
);
738 static long ib_umad_ioctl(struct file
*filp
, unsigned int cmd
,
742 case IB_USER_MAD_REGISTER_AGENT
:
743 return ib_umad_reg_agent(filp
->private_data
, (void __user
*) arg
, 0);
744 case IB_USER_MAD_UNREGISTER_AGENT
:
745 return ib_umad_unreg_agent(filp
->private_data
, (__u32 __user
*) arg
);
746 case IB_USER_MAD_ENABLE_PKEY
:
747 return ib_umad_enable_pkey(filp
->private_data
);
754 static long ib_umad_compat_ioctl(struct file
*filp
, unsigned int cmd
,
758 case IB_USER_MAD_REGISTER_AGENT
:
759 return ib_umad_reg_agent(filp
->private_data
, compat_ptr(arg
), 1);
760 case IB_USER_MAD_UNREGISTER_AGENT
:
761 return ib_umad_unreg_agent(filp
->private_data
, compat_ptr(arg
));
762 case IB_USER_MAD_ENABLE_PKEY
:
763 return ib_umad_enable_pkey(filp
->private_data
);
771 * ib_umad_open() does not need the BKL:
773 * - the ib_umad_port structures are properly reference counted, and
774 * everything else is purely local to the file being created, so
775 * races against other open calls are not a problem;
776 * - the ioctl method does not affect any global state outside of the
777 * file structure being operated on;
779 static int ib_umad_open(struct inode
*inode
, struct file
*filp
)
781 struct ib_umad_port
*port
;
782 struct ib_umad_file
*file
;
785 port
= container_of(inode
->i_cdev
, struct ib_umad_port
, cdev
);
787 kref_get(&port
->umad_dev
->ref
);
791 mutex_lock(&port
->file_mutex
);
798 file
= kzalloc(sizeof *file
, GFP_KERNEL
);
800 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
805 mutex_init(&file
->mutex
);
806 spin_lock_init(&file
->send_lock
);
807 INIT_LIST_HEAD(&file
->recv_list
);
808 INIT_LIST_HEAD(&file
->send_list
);
809 init_waitqueue_head(&file
->recv_wait
);
812 filp
->private_data
= file
;
814 list_add_tail(&file
->port_list
, &port
->file_list
);
817 mutex_unlock(&port
->file_mutex
);
821 static int ib_umad_close(struct inode
*inode
, struct file
*filp
)
823 struct ib_umad_file
*file
= filp
->private_data
;
824 struct ib_umad_device
*dev
= file
->port
->umad_dev
;
825 struct ib_umad_packet
*packet
, *tmp
;
829 mutex_lock(&file
->port
->file_mutex
);
830 mutex_lock(&file
->mutex
);
832 already_dead
= file
->agents_dead
;
833 file
->agents_dead
= 1;
835 list_for_each_entry_safe(packet
, tmp
, &file
->recv_list
, list
) {
837 ib_free_recv_mad(packet
->recv_wc
);
841 list_del(&file
->port_list
);
843 mutex_unlock(&file
->mutex
);
846 for (i
= 0; i
< IB_UMAD_MAX_AGENTS
; ++i
)
848 ib_unregister_mad_agent(file
->agent
[i
]);
850 mutex_unlock(&file
->port
->file_mutex
);
853 kref_put(&dev
->ref
, ib_umad_release_dev
);
858 static const struct file_operations umad_fops
= {
859 .owner
= THIS_MODULE
,
860 .read
= ib_umad_read
,
861 .write
= ib_umad_write
,
862 .poll
= ib_umad_poll
,
863 .unlocked_ioctl
= ib_umad_ioctl
,
865 .compat_ioctl
= ib_umad_compat_ioctl
,
867 .open
= ib_umad_open
,
868 .release
= ib_umad_close
871 static int ib_umad_sm_open(struct inode
*inode
, struct file
*filp
)
873 struct ib_umad_port
*port
;
874 struct ib_port_modify props
= {
875 .set_port_cap_mask
= IB_PORT_SM
879 port
= container_of(inode
->i_cdev
, struct ib_umad_port
, sm_cdev
);
881 kref_get(&port
->umad_dev
->ref
);
885 if (filp
->f_flags
& O_NONBLOCK
) {
886 if (down_trylock(&port
->sm_sem
)) {
891 if (down_interruptible(&port
->sm_sem
)) {
897 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
903 filp
->private_data
= port
;
908 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
912 static int ib_umad_sm_close(struct inode
*inode
, struct file
*filp
)
914 struct ib_umad_port
*port
= filp
->private_data
;
915 struct ib_port_modify props
= {
916 .clr_port_cap_mask
= IB_PORT_SM
920 mutex_lock(&port
->file_mutex
);
922 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
923 mutex_unlock(&port
->file_mutex
);
927 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
932 static const struct file_operations umad_sm_fops
= {
933 .owner
= THIS_MODULE
,
934 .open
= ib_umad_sm_open
,
935 .release
= ib_umad_sm_close
938 static struct ib_client umad_client
= {
940 .add
= ib_umad_add_one
,
941 .remove
= ib_umad_remove_one
944 static ssize_t
show_ibdev(struct device
*dev
, struct device_attribute
*attr
,
947 struct ib_umad_port
*port
= dev_get_drvdata(dev
);
952 return sprintf(buf
, "%s\n", port
->ib_dev
->name
);
954 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
956 static ssize_t
show_port(struct device
*dev
, struct device_attribute
*attr
,
959 struct ib_umad_port
*port
= dev_get_drvdata(dev
);
964 return sprintf(buf
, "%d\n", port
->port_num
);
966 static DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
968 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
969 __stringify(IB_USER_MAD_ABI_VERSION
));
971 static dev_t overflow_maj
;
972 static DECLARE_BITMAP(overflow_map
, IB_UMAD_MAX_PORTS
);
973 static int find_overflow_devnum(void)
978 ret
= alloc_chrdev_region(&overflow_maj
, 0, IB_UMAD_MAX_PORTS
* 2,
981 printk(KERN_ERR
"user_mad: couldn't register dynamic device number\n");
986 ret
= find_first_zero_bit(overflow_map
, IB_UMAD_MAX_PORTS
);
987 if (ret
>= IB_UMAD_MAX_PORTS
)
993 static int ib_umad_init_port(struct ib_device
*device
, int port_num
,
994 struct ib_umad_port
*port
)
999 spin_lock(&port_lock
);
1000 devnum
= find_first_zero_bit(dev_map
, IB_UMAD_MAX_PORTS
);
1001 if (devnum
>= IB_UMAD_MAX_PORTS
) {
1002 spin_unlock(&port_lock
);
1003 devnum
= find_overflow_devnum();
1007 spin_lock(&port_lock
);
1008 port
->dev_num
= devnum
+ IB_UMAD_MAX_PORTS
;
1009 base
= devnum
+ overflow_maj
;
1010 set_bit(devnum
, overflow_map
);
1012 port
->dev_num
= devnum
;
1013 base
= devnum
+ base_dev
;
1014 set_bit(devnum
, dev_map
);
1016 spin_unlock(&port_lock
);
1018 port
->ib_dev
= device
;
1019 port
->port_num
= port_num
;
1020 init_MUTEX(&port
->sm_sem
);
1021 mutex_init(&port
->file_mutex
);
1022 INIT_LIST_HEAD(&port
->file_list
);
1024 cdev_init(&port
->cdev
, &umad_fops
);
1025 port
->cdev
.owner
= THIS_MODULE
;
1026 kobject_set_name(&port
->cdev
.kobj
, "umad%d", port
->dev_num
);
1027 if (cdev_add(&port
->cdev
, base
, 1))
1030 port
->dev
= device_create(umad_class
, device
->dma_device
,
1031 port
->cdev
.dev
, port
,
1032 "umad%d", port
->dev_num
);
1033 if (IS_ERR(port
->dev
))
1036 if (device_create_file(port
->dev
, &dev_attr_ibdev
))
1038 if (device_create_file(port
->dev
, &dev_attr_port
))
1041 base
+= IB_UMAD_MAX_PORTS
;
1042 cdev_init(&port
->sm_cdev
, &umad_sm_fops
);
1043 port
->sm_cdev
.owner
= THIS_MODULE
;
1044 kobject_set_name(&port
->sm_cdev
.kobj
, "issm%d", port
->dev_num
);
1045 if (cdev_add(&port
->sm_cdev
, base
, 1))
1048 port
->sm_dev
= device_create(umad_class
, device
->dma_device
,
1049 port
->sm_cdev
.dev
, port
,
1050 "issm%d", port
->dev_num
);
1051 if (IS_ERR(port
->sm_dev
))
1054 if (device_create_file(port
->sm_dev
, &dev_attr_ibdev
))
1056 if (device_create_file(port
->sm_dev
, &dev_attr_port
))
1062 device_destroy(umad_class
, port
->sm_cdev
.dev
);
1065 cdev_del(&port
->sm_cdev
);
1068 device_destroy(umad_class
, port
->cdev
.dev
);
1071 cdev_del(&port
->cdev
);
1072 if (port
->dev_num
< IB_UMAD_MAX_PORTS
)
1073 clear_bit(devnum
, dev_map
);
1075 clear_bit(devnum
, overflow_map
);
1080 static void ib_umad_kill_port(struct ib_umad_port
*port
)
1082 struct ib_umad_file
*file
;
1086 dev_set_drvdata(port
->dev
, NULL
);
1087 dev_set_drvdata(port
->sm_dev
, NULL
);
1089 device_destroy(umad_class
, port
->cdev
.dev
);
1090 device_destroy(umad_class
, port
->sm_cdev
.dev
);
1092 cdev_del(&port
->cdev
);
1093 cdev_del(&port
->sm_cdev
);
1095 mutex_lock(&port
->file_mutex
);
1097 port
->ib_dev
= NULL
;
1099 list_for_each_entry(file
, &port
->file_list
, port_list
) {
1100 mutex_lock(&file
->mutex
);
1101 already_dead
= file
->agents_dead
;
1102 file
->agents_dead
= 1;
1103 mutex_unlock(&file
->mutex
);
1105 for (id
= 0; id
< IB_UMAD_MAX_AGENTS
; ++id
)
1106 if (file
->agent
[id
])
1107 ib_unregister_mad_agent(file
->agent
[id
]);
1110 mutex_unlock(&port
->file_mutex
);
1112 if (port
->dev_num
< IB_UMAD_MAX_PORTS
)
1113 clear_bit(port
->dev_num
, dev_map
);
1115 clear_bit(port
->dev_num
- IB_UMAD_MAX_PORTS
, overflow_map
);
1118 static void ib_umad_add_one(struct ib_device
*device
)
1120 struct ib_umad_device
*umad_dev
;
1123 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
1126 if (device
->node_type
== RDMA_NODE_IB_SWITCH
)
1130 e
= device
->phys_port_cnt
;
1133 umad_dev
= kzalloc(sizeof *umad_dev
+
1134 (e
- s
+ 1) * sizeof (struct ib_umad_port
),
1139 kref_init(&umad_dev
->ref
);
1141 umad_dev
->start_port
= s
;
1142 umad_dev
->end_port
= e
;
1144 for (i
= s
; i
<= e
; ++i
) {
1145 umad_dev
->port
[i
- s
].umad_dev
= umad_dev
;
1147 if (ib_umad_init_port(device
, i
, &umad_dev
->port
[i
- s
]))
1151 ib_set_client_data(device
, &umad_client
, umad_dev
);
1157 ib_umad_kill_port(&umad_dev
->port
[i
- s
]);
1159 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
1162 static void ib_umad_remove_one(struct ib_device
*device
)
1164 struct ib_umad_device
*umad_dev
= ib_get_client_data(device
, &umad_client
);
1170 for (i
= 0; i
<= umad_dev
->end_port
- umad_dev
->start_port
; ++i
)
1171 ib_umad_kill_port(&umad_dev
->port
[i
]);
1173 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
1176 static int __init
ib_umad_init(void)
1180 ret
= register_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2,
1183 printk(KERN_ERR
"user_mad: couldn't register device number\n");
1187 umad_class
= class_create(THIS_MODULE
, "infiniband_mad");
1188 if (IS_ERR(umad_class
)) {
1189 ret
= PTR_ERR(umad_class
);
1190 printk(KERN_ERR
"user_mad: couldn't create class infiniband_mad\n");
1194 ret
= class_create_file(umad_class
, &class_attr_abi_version
.attr
);
1196 printk(KERN_ERR
"user_mad: couldn't create abi_version attribute\n");
1200 ret
= ib_register_client(&umad_client
);
1202 printk(KERN_ERR
"user_mad: couldn't register ib_umad client\n");
1209 class_destroy(umad_class
);
1212 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
1218 static void __exit
ib_umad_cleanup(void)
1220 ib_unregister_client(&umad_client
);
1221 class_destroy(umad_class
);
1222 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
1224 unregister_chrdev_region(overflow_maj
, IB_UMAD_MAX_PORTS
* 2);
1227 module_init(ib_umad_init
);
1228 module_exit(ib_umad_cleanup
);