2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: user_mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/cdev.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/poll.h>
46 #include <linux/rwsem.h>
47 #include <linux/kref.h>
49 #include <asm/uaccess.h>
50 #include <asm/semaphore.h>
52 #include <rdma/ib_mad.h>
53 #include <rdma/ib_user_mad.h>
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
57 MODULE_LICENSE("Dual BSD/GPL");
60 IB_UMAD_MAX_PORTS
= 64,
61 IB_UMAD_MAX_AGENTS
= 32,
64 IB_UMAD_MINOR_BASE
= 0
68 * Our lifetime rules for these structs are the following: each time a
69 * device special file is opened, we look up the corresponding struct
70 * ib_umad_port by minor in the umad_port[] table while holding the
71 * port_lock. If this lookup succeeds, we take a reference on the
72 * ib_umad_port's struct ib_umad_device while still holding the
73 * port_lock; if the lookup fails, we fail the open(). We drop these
74 * references in the corresponding close().
76 * In addition to references coming from open character devices, there
77 * is one more reference to each ib_umad_device representing the
78 * module's reference taken when allocating the ib_umad_device in
81 * When destroying an ib_umad_device, we clear all of its
82 * ib_umad_ports from umad_port[] while holding port_lock before
83 * dropping the module's reference to the ib_umad_device. This is
84 * always safe because any open() calls will either succeed and obtain
85 * a reference before we clear the umad_port[] entries, or fail after
86 * we clear the umad_port[] entries.
91 struct class_device
*class_dev
;
94 struct class_device
*sm_class_dev
;
95 struct semaphore sm_sem
;
97 struct rw_semaphore mutex
;
98 struct list_head file_list
;
100 struct ib_device
*ib_dev
;
101 struct ib_umad_device
*umad_dev
;
106 struct ib_umad_device
{
107 int start_port
, end_port
;
109 struct ib_umad_port port
[0];
112 struct ib_umad_file
{
113 struct ib_umad_port
*port
;
114 struct list_head recv_list
;
115 struct list_head port_list
;
116 spinlock_t recv_lock
;
117 wait_queue_head_t recv_wait
;
118 struct ib_mad_agent
*agent
[IB_UMAD_MAX_AGENTS
];
122 struct ib_umad_packet
{
123 struct ib_mad_send_buf
*msg
;
124 struct ib_mad_recv_wc
*recv_wc
;
125 struct list_head list
;
127 struct ib_user_mad mad
;
130 static struct class *umad_class
;
132 static const dev_t base_dev
= MKDEV(IB_UMAD_MAJOR
, IB_UMAD_MINOR_BASE
);
134 static DEFINE_SPINLOCK(port_lock
);
135 static struct ib_umad_port
*umad_port
[IB_UMAD_MAX_PORTS
];
136 static DECLARE_BITMAP(dev_map
, IB_UMAD_MAX_PORTS
* 2);
138 static void ib_umad_add_one(struct ib_device
*device
);
139 static void ib_umad_remove_one(struct ib_device
*device
);
141 static void ib_umad_release_dev(struct kref
*ref
)
143 struct ib_umad_device
*dev
=
144 container_of(ref
, struct ib_umad_device
, ref
);
149 /* caller must hold port->mutex at least for reading */
150 static struct ib_mad_agent
*__get_agent(struct ib_umad_file
*file
, int id
)
152 return file
->agents_dead
? NULL
: file
->agent
[id
];
155 static int queue_packet(struct ib_umad_file
*file
,
156 struct ib_mad_agent
*agent
,
157 struct ib_umad_packet
*packet
)
161 down_read(&file
->port
->mutex
);
163 for (packet
->mad
.hdr
.id
= 0;
164 packet
->mad
.hdr
.id
< IB_UMAD_MAX_AGENTS
;
165 packet
->mad
.hdr
.id
++)
166 if (agent
== __get_agent(file
, packet
->mad
.hdr
.id
)) {
167 spin_lock_irq(&file
->recv_lock
);
168 list_add_tail(&packet
->list
, &file
->recv_list
);
169 spin_unlock_irq(&file
->recv_lock
);
170 wake_up_interruptible(&file
->recv_wait
);
175 up_read(&file
->port
->mutex
);
180 static int data_offset(u8 mgmt_class
)
182 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
183 return IB_MGMT_SA_HDR
;
184 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
185 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
186 return IB_MGMT_VENDOR_HDR
;
188 return IB_MGMT_RMPP_HDR
;
191 static void send_handler(struct ib_mad_agent
*agent
,
192 struct ib_mad_send_wc
*send_wc
)
194 struct ib_umad_file
*file
= agent
->context
;
195 struct ib_umad_packet
*packet
= send_wc
->send_buf
->context
[0];
197 ib_destroy_ah(packet
->msg
->ah
);
198 ib_free_send_mad(packet
->msg
);
200 if (send_wc
->status
== IB_WC_RESP_TIMEOUT_ERR
) {
201 packet
->length
= IB_MGMT_MAD_HDR
;
202 packet
->mad
.hdr
.status
= ETIMEDOUT
;
203 if (!queue_packet(file
, agent
, packet
))
209 static void recv_handler(struct ib_mad_agent
*agent
,
210 struct ib_mad_recv_wc
*mad_recv_wc
)
212 struct ib_umad_file
*file
= agent
->context
;
213 struct ib_umad_packet
*packet
;
215 if (mad_recv_wc
->wc
->status
!= IB_WC_SUCCESS
)
218 packet
= kzalloc(sizeof *packet
, GFP_KERNEL
);
222 packet
->length
= mad_recv_wc
->mad_len
;
223 packet
->recv_wc
= mad_recv_wc
;
225 packet
->mad
.hdr
.status
= 0;
226 packet
->mad
.hdr
.length
= sizeof (struct ib_user_mad
) +
227 mad_recv_wc
->mad_len
;
228 packet
->mad
.hdr
.qpn
= cpu_to_be32(mad_recv_wc
->wc
->src_qp
);
229 packet
->mad
.hdr
.lid
= cpu_to_be16(mad_recv_wc
->wc
->slid
);
230 packet
->mad
.hdr
.sl
= mad_recv_wc
->wc
->sl
;
231 packet
->mad
.hdr
.path_bits
= mad_recv_wc
->wc
->dlid_path_bits
;
232 packet
->mad
.hdr
.grh_present
= !!(mad_recv_wc
->wc
->wc_flags
& IB_WC_GRH
);
233 if (packet
->mad
.hdr
.grh_present
) {
235 packet
->mad
.hdr
.gid_index
= 0;
236 packet
->mad
.hdr
.hop_limit
= 0;
237 packet
->mad
.hdr
.traffic_class
= 0;
238 memset(packet
->mad
.hdr
.gid
, 0, 16);
239 packet
->mad
.hdr
.flow_label
= 0;
242 if (queue_packet(file
, agent
, packet
))
249 ib_free_recv_mad(mad_recv_wc
);
252 static ssize_t
copy_recv_mad(char __user
*buf
, struct ib_umad_packet
*packet
,
255 struct ib_mad_recv_buf
*recv_buf
;
256 int left
, seg_payload
, offset
, max_seg_payload
;
258 /* We need enough room to copy the first (or only) MAD segment. */
259 recv_buf
= &packet
->recv_wc
->recv_buf
;
260 if ((packet
->length
<= sizeof (*recv_buf
->mad
) &&
261 count
< sizeof (packet
->mad
) + packet
->length
) ||
262 (packet
->length
> sizeof (*recv_buf
->mad
) &&
263 count
< sizeof (packet
->mad
) + sizeof (*recv_buf
->mad
)))
266 if (copy_to_user(buf
, &packet
->mad
, sizeof (packet
->mad
)))
269 buf
+= sizeof (packet
->mad
);
270 seg_payload
= min_t(int, packet
->length
, sizeof (*recv_buf
->mad
));
271 if (copy_to_user(buf
, recv_buf
->mad
, seg_payload
))
274 if (seg_payload
< packet
->length
) {
276 * Multipacket RMPP MAD message. Copy remainder of message.
277 * Note that last segment may have a shorter payload.
279 if (count
< sizeof (packet
->mad
) + packet
->length
) {
281 * The buffer is too small, return the first RMPP segment,
282 * which includes the RMPP message length.
286 offset
= data_offset(recv_buf
->mad
->mad_hdr
.mgmt_class
);
287 max_seg_payload
= sizeof (struct ib_mad
) - offset
;
289 for (left
= packet
->length
- seg_payload
, buf
+= seg_payload
;
290 left
; left
-= seg_payload
, buf
+= seg_payload
) {
291 recv_buf
= container_of(recv_buf
->list
.next
,
292 struct ib_mad_recv_buf
, list
);
293 seg_payload
= min(left
, max_seg_payload
);
294 if (copy_to_user(buf
, ((void *) recv_buf
->mad
) + offset
,
299 return sizeof (packet
->mad
) + packet
->length
;
302 static ssize_t
copy_send_mad(char __user
*buf
, struct ib_umad_packet
*packet
,
305 ssize_t size
= sizeof (packet
->mad
) + packet
->length
;
310 if (copy_to_user(buf
, &packet
->mad
, size
))
316 static ssize_t
ib_umad_read(struct file
*filp
, char __user
*buf
,
317 size_t count
, loff_t
*pos
)
319 struct ib_umad_file
*file
= filp
->private_data
;
320 struct ib_umad_packet
*packet
;
323 if (count
< sizeof (struct ib_user_mad
))
326 spin_lock_irq(&file
->recv_lock
);
328 while (list_empty(&file
->recv_list
)) {
329 spin_unlock_irq(&file
->recv_lock
);
331 if (filp
->f_flags
& O_NONBLOCK
)
334 if (wait_event_interruptible(file
->recv_wait
,
335 !list_empty(&file
->recv_list
)))
338 spin_lock_irq(&file
->recv_lock
);
341 packet
= list_entry(file
->recv_list
.next
, struct ib_umad_packet
, list
);
342 list_del(&packet
->list
);
344 spin_unlock_irq(&file
->recv_lock
);
347 ret
= copy_recv_mad(buf
, packet
, count
);
349 ret
= copy_send_mad(buf
, packet
, count
);
353 spin_lock_irq(&file
->recv_lock
);
354 list_add(&packet
->list
, &file
->recv_list
);
355 spin_unlock_irq(&file
->recv_lock
);
358 ib_free_recv_mad(packet
->recv_wc
);
364 static int copy_rmpp_mad(struct ib_mad_send_buf
*msg
, const char __user
*buf
)
368 /* Copy class specific header */
369 if ((msg
->hdr_len
> IB_MGMT_RMPP_HDR
) &&
370 copy_from_user(msg
->mad
+ IB_MGMT_RMPP_HDR
, buf
+ IB_MGMT_RMPP_HDR
,
371 msg
->hdr_len
- IB_MGMT_RMPP_HDR
))
374 /* All headers are in place. Copy data segments. */
375 for (seg
= 1, left
= msg
->data_len
, buf
+= msg
->hdr_len
; left
> 0;
376 seg
++, left
-= msg
->seg_size
, buf
+= msg
->seg_size
) {
377 if (copy_from_user(ib_get_rmpp_segment(msg
, seg
), buf
,
378 min(left
, msg
->seg_size
)))
384 static ssize_t
ib_umad_write(struct file
*filp
, const char __user
*buf
,
385 size_t count
, loff_t
*pos
)
387 struct ib_umad_file
*file
= filp
->private_data
;
388 struct ib_umad_packet
*packet
;
389 struct ib_mad_agent
*agent
;
390 struct ib_ah_attr ah_attr
;
392 struct ib_rmpp_mad
*rmpp_mad
;
395 int ret
, data_len
, hdr_len
, copy_offset
, rmpp_active
;
397 if (count
< sizeof (struct ib_user_mad
) + IB_MGMT_RMPP_HDR
)
400 packet
= kzalloc(sizeof *packet
+ IB_MGMT_RMPP_HDR
, GFP_KERNEL
);
404 if (copy_from_user(&packet
->mad
, buf
,
405 sizeof (struct ib_user_mad
) + IB_MGMT_RMPP_HDR
)) {
410 if (packet
->mad
.hdr
.id
< 0 ||
411 packet
->mad
.hdr
.id
>= IB_UMAD_MAX_AGENTS
) {
416 down_read(&file
->port
->mutex
);
418 agent
= __get_agent(file
, packet
->mad
.hdr
.id
);
424 memset(&ah_attr
, 0, sizeof ah_attr
);
425 ah_attr
.dlid
= be16_to_cpu(packet
->mad
.hdr
.lid
);
426 ah_attr
.sl
= packet
->mad
.hdr
.sl
;
427 ah_attr
.src_path_bits
= packet
->mad
.hdr
.path_bits
;
428 ah_attr
.port_num
= file
->port
->port_num
;
429 if (packet
->mad
.hdr
.grh_present
) {
430 ah_attr
.ah_flags
= IB_AH_GRH
;
431 memcpy(ah_attr
.grh
.dgid
.raw
, packet
->mad
.hdr
.gid
, 16);
432 ah_attr
.grh
.flow_label
= be32_to_cpu(packet
->mad
.hdr
.flow_label
);
433 ah_attr
.grh
.hop_limit
= packet
->mad
.hdr
.hop_limit
;
434 ah_attr
.grh
.traffic_class
= packet
->mad
.hdr
.traffic_class
;
437 ah
= ib_create_ah(agent
->qp
->pd
, &ah_attr
);
443 rmpp_mad
= (struct ib_rmpp_mad
*) packet
->mad
.data
;
444 if (rmpp_mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) {
445 hdr_len
= IB_MGMT_SA_HDR
;
446 copy_offset
= IB_MGMT_RMPP_HDR
;
447 rmpp_active
= ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
448 IB_MGMT_RMPP_FLAG_ACTIVE
;
449 } else if (rmpp_mad
->mad_hdr
.mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
&&
450 rmpp_mad
->mad_hdr
.mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
) {
451 hdr_len
= IB_MGMT_VENDOR_HDR
;
452 copy_offset
= IB_MGMT_RMPP_HDR
;
453 rmpp_active
= ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
454 IB_MGMT_RMPP_FLAG_ACTIVE
;
456 hdr_len
= IB_MGMT_MAD_HDR
;
457 copy_offset
= IB_MGMT_MAD_HDR
;
461 data_len
= count
- sizeof (struct ib_user_mad
) - hdr_len
;
462 packet
->msg
= ib_create_send_mad(agent
,
463 be32_to_cpu(packet
->mad
.hdr
.qpn
),
464 0, rmpp_active
, hdr_len
,
465 data_len
, GFP_KERNEL
);
466 if (IS_ERR(packet
->msg
)) {
467 ret
= PTR_ERR(packet
->msg
);
471 packet
->msg
->ah
= ah
;
472 packet
->msg
->timeout_ms
= packet
->mad
.hdr
.timeout_ms
;
473 packet
->msg
->retries
= packet
->mad
.hdr
.retries
;
474 packet
->msg
->context
[0] = packet
;
476 /* Copy MAD header. Any RMPP header is already in place. */
477 memcpy(packet
->msg
->mad
, packet
->mad
.data
, IB_MGMT_MAD_HDR
);
478 buf
+= sizeof (struct ib_user_mad
);
481 if (copy_from_user(packet
->msg
->mad
+ copy_offset
,
483 hdr_len
+ data_len
- copy_offset
)) {
488 ret
= copy_rmpp_mad(packet
->msg
, buf
);
494 * If userspace is generating a request that will generate a
495 * response, we need to make sure the high-order part of the
496 * transaction ID matches the agent being used to send the
499 method
= ((struct ib_mad_hdr
*) packet
->msg
->mad
)->method
;
501 if (!(method
& IB_MGMT_METHOD_RESP
) &&
502 method
!= IB_MGMT_METHOD_TRAP_REPRESS
&&
503 method
!= IB_MGMT_METHOD_SEND
) {
504 tid
= &((struct ib_mad_hdr
*) packet
->msg
->mad
)->tid
;
505 *tid
= cpu_to_be64(((u64
) agent
->hi_tid
) << 32 |
506 (be64_to_cpup(tid
) & 0xffffffff));
509 ret
= ib_post_send_mad(packet
->msg
, NULL
);
513 up_read(&file
->port
->mutex
);
517 ib_free_send_mad(packet
->msg
);
521 up_read(&file
->port
->mutex
);
527 static unsigned int ib_umad_poll(struct file
*filp
, struct poll_table_struct
*wait
)
529 struct ib_umad_file
*file
= filp
->private_data
;
531 /* we will always be able to post a MAD send */
532 unsigned int mask
= POLLOUT
| POLLWRNORM
;
534 poll_wait(filp
, &file
->recv_wait
, wait
);
536 if (!list_empty(&file
->recv_list
))
537 mask
|= POLLIN
| POLLRDNORM
;
542 static int ib_umad_reg_agent(struct ib_umad_file
*file
, unsigned long arg
)
544 struct ib_user_mad_reg_req ureq
;
545 struct ib_mad_reg_req req
;
546 struct ib_mad_agent
*agent
;
550 down_write(&file
->port
->mutex
);
552 if (!file
->port
->ib_dev
) {
557 if (copy_from_user(&ureq
, (void __user
*) arg
, sizeof ureq
)) {
562 if (ureq
.qpn
!= 0 && ureq
.qpn
!= 1) {
567 for (agent_id
= 0; agent_id
< IB_UMAD_MAX_AGENTS
; ++agent_id
)
568 if (!__get_agent(file
, agent_id
))
575 if (ureq
.mgmt_class
) {
576 req
.mgmt_class
= ureq
.mgmt_class
;
577 req
.mgmt_class_version
= ureq
.mgmt_class_version
;
578 memcpy(req
.method_mask
, ureq
.method_mask
, sizeof req
.method_mask
);
579 memcpy(req
.oui
, ureq
.oui
, sizeof req
.oui
);
582 agent
= ib_register_mad_agent(file
->port
->ib_dev
, file
->port
->port_num
,
583 ureq
.qpn
? IB_QPT_GSI
: IB_QPT_SMI
,
584 ureq
.mgmt_class
? &req
: NULL
,
586 send_handler
, recv_handler
, file
);
588 ret
= PTR_ERR(agent
);
592 if (put_user(agent_id
,
593 (u32 __user
*) (arg
+ offsetof(struct ib_user_mad_reg_req
, id
)))) {
595 ib_unregister_mad_agent(agent
);
599 file
->agent
[agent_id
] = agent
;
603 up_write(&file
->port
->mutex
);
607 static int ib_umad_unreg_agent(struct ib_umad_file
*file
, unsigned long arg
)
609 struct ib_mad_agent
*agent
= NULL
;
613 if (get_user(id
, (u32 __user
*) arg
))
616 down_write(&file
->port
->mutex
);
618 if (id
< 0 || id
>= IB_UMAD_MAX_AGENTS
|| !__get_agent(file
, id
)) {
623 agent
= file
->agent
[id
];
624 file
->agent
[id
] = NULL
;
627 up_write(&file
->port
->mutex
);
630 ib_unregister_mad_agent(agent
);
635 static long ib_umad_ioctl(struct file
*filp
, unsigned int cmd
,
639 case IB_USER_MAD_REGISTER_AGENT
:
640 return ib_umad_reg_agent(filp
->private_data
, arg
);
641 case IB_USER_MAD_UNREGISTER_AGENT
:
642 return ib_umad_unreg_agent(filp
->private_data
, arg
);
648 static int ib_umad_open(struct inode
*inode
, struct file
*filp
)
650 struct ib_umad_port
*port
;
651 struct ib_umad_file
*file
;
654 spin_lock(&port_lock
);
655 port
= umad_port
[iminor(inode
) - IB_UMAD_MINOR_BASE
];
657 kref_get(&port
->umad_dev
->ref
);
658 spin_unlock(&port_lock
);
663 down_write(&port
->mutex
);
670 file
= kzalloc(sizeof *file
, GFP_KERNEL
);
672 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
677 spin_lock_init(&file
->recv_lock
);
678 INIT_LIST_HEAD(&file
->recv_list
);
679 init_waitqueue_head(&file
->recv_wait
);
682 filp
->private_data
= file
;
684 list_add_tail(&file
->port_list
, &port
->file_list
);
687 up_write(&port
->mutex
);
691 static int ib_umad_close(struct inode
*inode
, struct file
*filp
)
693 struct ib_umad_file
*file
= filp
->private_data
;
694 struct ib_umad_device
*dev
= file
->port
->umad_dev
;
695 struct ib_umad_packet
*packet
, *tmp
;
699 down_write(&file
->port
->mutex
);
701 already_dead
= file
->agents_dead
;
702 file
->agents_dead
= 1;
704 list_for_each_entry_safe(packet
, tmp
, &file
->recv_list
, list
) {
706 ib_free_recv_mad(packet
->recv_wc
);
710 list_del(&file
->port_list
);
712 downgrade_write(&file
->port
->mutex
);
715 for (i
= 0; i
< IB_UMAD_MAX_AGENTS
; ++i
)
717 ib_unregister_mad_agent(file
->agent
[i
]);
719 up_read(&file
->port
->mutex
);
722 kref_put(&dev
->ref
, ib_umad_release_dev
);
727 static struct file_operations umad_fops
= {
728 .owner
= THIS_MODULE
,
729 .read
= ib_umad_read
,
730 .write
= ib_umad_write
,
731 .poll
= ib_umad_poll
,
732 .unlocked_ioctl
= ib_umad_ioctl
,
733 .compat_ioctl
= ib_umad_ioctl
,
734 .open
= ib_umad_open
,
735 .release
= ib_umad_close
738 static int ib_umad_sm_open(struct inode
*inode
, struct file
*filp
)
740 struct ib_umad_port
*port
;
741 struct ib_port_modify props
= {
742 .set_port_cap_mask
= IB_PORT_SM
746 spin_lock(&port_lock
);
747 port
= umad_port
[iminor(inode
) - IB_UMAD_MINOR_BASE
- IB_UMAD_MAX_PORTS
];
749 kref_get(&port
->umad_dev
->ref
);
750 spin_unlock(&port_lock
);
755 if (filp
->f_flags
& O_NONBLOCK
) {
756 if (down_trylock(&port
->sm_sem
)) {
761 if (down_interruptible(&port
->sm_sem
)) {
767 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
773 filp
->private_data
= port
;
778 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
782 static int ib_umad_sm_close(struct inode
*inode
, struct file
*filp
)
784 struct ib_umad_port
*port
= filp
->private_data
;
785 struct ib_port_modify props
= {
786 .clr_port_cap_mask
= IB_PORT_SM
790 down_write(&port
->mutex
);
792 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
793 up_write(&port
->mutex
);
797 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
802 static struct file_operations umad_sm_fops
= {
803 .owner
= THIS_MODULE
,
804 .open
= ib_umad_sm_open
,
805 .release
= ib_umad_sm_close
808 static struct ib_client umad_client
= {
810 .add
= ib_umad_add_one
,
811 .remove
= ib_umad_remove_one
814 static ssize_t
show_ibdev(struct class_device
*class_dev
, char *buf
)
816 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
821 return sprintf(buf
, "%s\n", port
->ib_dev
->name
);
823 static CLASS_DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
825 static ssize_t
show_port(struct class_device
*class_dev
, char *buf
)
827 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
832 return sprintf(buf
, "%d\n", port
->port_num
);
834 static CLASS_DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
836 static ssize_t
show_abi_version(struct class *class, char *buf
)
838 return sprintf(buf
, "%d\n", IB_USER_MAD_ABI_VERSION
);
840 static CLASS_ATTR(abi_version
, S_IRUGO
, show_abi_version
, NULL
);
842 static int ib_umad_init_port(struct ib_device
*device
, int port_num
,
843 struct ib_umad_port
*port
)
845 spin_lock(&port_lock
);
846 port
->dev_num
= find_first_zero_bit(dev_map
, IB_UMAD_MAX_PORTS
);
847 if (port
->dev_num
>= IB_UMAD_MAX_PORTS
) {
848 spin_unlock(&port_lock
);
851 set_bit(port
->dev_num
, dev_map
);
852 spin_unlock(&port_lock
);
854 port
->ib_dev
= device
;
855 port
->port_num
= port_num
;
856 init_MUTEX(&port
->sm_sem
);
857 init_rwsem(&port
->mutex
);
858 INIT_LIST_HEAD(&port
->file_list
);
860 port
->dev
= cdev_alloc();
863 port
->dev
->owner
= THIS_MODULE
;
864 port
->dev
->ops
= &umad_fops
;
865 kobject_set_name(&port
->dev
->kobj
, "umad%d", port
->dev_num
);
866 if (cdev_add(port
->dev
, base_dev
+ port
->dev_num
, 1))
869 port
->class_dev
= class_device_create(umad_class
, NULL
, port
->dev
->dev
,
871 "umad%d", port
->dev_num
);
872 if (IS_ERR(port
->class_dev
))
875 if (class_device_create_file(port
->class_dev
, &class_device_attr_ibdev
))
877 if (class_device_create_file(port
->class_dev
, &class_device_attr_port
))
880 port
->sm_dev
= cdev_alloc();
883 port
->sm_dev
->owner
= THIS_MODULE
;
884 port
->sm_dev
->ops
= &umad_sm_fops
;
885 kobject_set_name(&port
->sm_dev
->kobj
, "issm%d", port
->dev_num
);
886 if (cdev_add(port
->sm_dev
, base_dev
+ port
->dev_num
+ IB_UMAD_MAX_PORTS
, 1))
889 port
->sm_class_dev
= class_device_create(umad_class
, NULL
, port
->sm_dev
->dev
,
891 "issm%d", port
->dev_num
);
892 if (IS_ERR(port
->sm_class_dev
))
895 class_set_devdata(port
->class_dev
, port
);
896 class_set_devdata(port
->sm_class_dev
, port
);
898 if (class_device_create_file(port
->sm_class_dev
, &class_device_attr_ibdev
))
900 if (class_device_create_file(port
->sm_class_dev
, &class_device_attr_port
))
903 spin_lock(&port_lock
);
904 umad_port
[port
->dev_num
] = port
;
905 spin_unlock(&port_lock
);
910 class_device_destroy(umad_class
, port
->sm_dev
->dev
);
913 cdev_del(port
->sm_dev
);
916 class_device_destroy(umad_class
, port
->dev
->dev
);
920 clear_bit(port
->dev_num
, dev_map
);
925 static void ib_umad_kill_port(struct ib_umad_port
*port
)
927 struct ib_umad_file
*file
;
930 class_set_devdata(port
->class_dev
, NULL
);
931 class_set_devdata(port
->sm_class_dev
, NULL
);
933 class_device_destroy(umad_class
, port
->dev
->dev
);
934 class_device_destroy(umad_class
, port
->sm_dev
->dev
);
937 cdev_del(port
->sm_dev
);
939 spin_lock(&port_lock
);
940 umad_port
[port
->dev_num
] = NULL
;
941 spin_unlock(&port_lock
);
943 down_write(&port
->mutex
);
948 * Now go through the list of files attached to this port and
949 * unregister all of their MAD agents. We need to hold
950 * port->mutex while doing this to avoid racing with
951 * ib_umad_close(), but we can't hold the mutex for writing
952 * while calling ib_unregister_mad_agent(), since that might
953 * deadlock by calling back into queue_packet(). So we
954 * downgrade our lock to a read lock, and then drop and
955 * reacquire the write lock for the next iteration.
957 * We do list_del_init() on the file's list_head so that the
958 * list_del in ib_umad_close() is still OK, even after the
959 * file is removed from the list.
961 while (!list_empty(&port
->file_list
)) {
962 file
= list_entry(port
->file_list
.next
, struct ib_umad_file
,
965 file
->agents_dead
= 1;
966 list_del_init(&file
->port_list
);
968 downgrade_write(&port
->mutex
);
970 for (id
= 0; id
< IB_UMAD_MAX_AGENTS
; ++id
)
972 ib_unregister_mad_agent(file
->agent
[id
]);
974 up_read(&port
->mutex
);
975 down_write(&port
->mutex
);
978 up_write(&port
->mutex
);
980 clear_bit(port
->dev_num
, dev_map
);
983 static void ib_umad_add_one(struct ib_device
*device
)
985 struct ib_umad_device
*umad_dev
;
988 if (device
->node_type
== IB_NODE_SWITCH
)
992 e
= device
->phys_port_cnt
;
995 umad_dev
= kzalloc(sizeof *umad_dev
+
996 (e
- s
+ 1) * sizeof (struct ib_umad_port
),
1001 kref_init(&umad_dev
->ref
);
1003 umad_dev
->start_port
= s
;
1004 umad_dev
->end_port
= e
;
1006 for (i
= s
; i
<= e
; ++i
) {
1007 umad_dev
->port
[i
- s
].umad_dev
= umad_dev
;
1009 if (ib_umad_init_port(device
, i
, &umad_dev
->port
[i
- s
]))
1013 ib_set_client_data(device
, &umad_client
, umad_dev
);
1019 ib_umad_kill_port(&umad_dev
->port
[i
- s
]);
1021 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
1024 static void ib_umad_remove_one(struct ib_device
*device
)
1026 struct ib_umad_device
*umad_dev
= ib_get_client_data(device
, &umad_client
);
1032 for (i
= 0; i
<= umad_dev
->end_port
- umad_dev
->start_port
; ++i
)
1033 ib_umad_kill_port(&umad_dev
->port
[i
]);
1035 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
1038 static int __init
ib_umad_init(void)
1042 ret
= register_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2,
1045 printk(KERN_ERR
"user_mad: couldn't register device number\n");
1049 umad_class
= class_create(THIS_MODULE
, "infiniband_mad");
1050 if (IS_ERR(umad_class
)) {
1051 ret
= PTR_ERR(umad_class
);
1052 printk(KERN_ERR
"user_mad: couldn't create class infiniband_mad\n");
1056 ret
= class_create_file(umad_class
, &class_attr_abi_version
);
1058 printk(KERN_ERR
"user_mad: couldn't create abi_version attribute\n");
1062 ret
= ib_register_client(&umad_client
);
1064 printk(KERN_ERR
"user_mad: couldn't register ib_umad client\n");
1071 class_destroy(umad_class
);
1074 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
1080 static void __exit
ib_umad_cleanup(void)
1082 ib_unregister_client(&umad_client
);
1083 class_destroy(umad_class
);
1084 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
1087 module_init(ib_umad_init
);
1088 module_exit(ib_umad_cleanup
);