2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/cdev.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/poll.h>
46 #include <linux/rwsem.h>
47 #include <linux/kref.h>
49 #include <asm/uaccess.h>
50 #include <asm/semaphore.h>
52 #include <rdma/ib_mad.h>
53 #include <rdma/ib_user_mad.h>
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
57 MODULE_LICENSE("Dual BSD/GPL");
60 IB_UMAD_MAX_PORTS
= 64,
61 IB_UMAD_MAX_AGENTS
= 32,
64 IB_UMAD_MINOR_BASE
= 0
70 struct class_device class_dev
;
74 struct class_device sm_class_dev
;
75 struct semaphore sm_sem
;
77 struct ib_device
*ib_dev
;
78 struct ib_umad_device
*umad_dev
;
82 struct ib_umad_device
{
83 int start_port
, end_port
;
85 struct ib_umad_port port
[0];
89 struct ib_umad_port
*port
;
91 struct list_head recv_list
;
92 wait_queue_head_t recv_wait
;
93 struct rw_semaphore agent_mutex
;
94 struct ib_mad_agent
*agent
[IB_UMAD_MAX_AGENTS
];
95 struct ib_mr
*mr
[IB_UMAD_MAX_AGENTS
];
98 struct ib_umad_packet
{
100 struct ib_mad_send_buf
*msg
;
101 struct list_head list
;
103 DECLARE_PCI_UNMAP_ADDR(mapping
)
104 struct ib_user_mad mad
;
107 static const dev_t base_dev
= MKDEV(IB_UMAD_MAJOR
, IB_UMAD_MINOR_BASE
);
108 static spinlock_t map_lock
;
109 static DECLARE_BITMAP(dev_map
, IB_UMAD_MAX_PORTS
* 2);
111 static void ib_umad_add_one(struct ib_device
*device
);
112 static void ib_umad_remove_one(struct ib_device
*device
);
114 static int queue_packet(struct ib_umad_file
*file
,
115 struct ib_mad_agent
*agent
,
116 struct ib_umad_packet
*packet
)
120 down_read(&file
->agent_mutex
);
121 for (packet
->mad
.hdr
.id
= 0;
122 packet
->mad
.hdr
.id
< IB_UMAD_MAX_AGENTS
;
123 packet
->mad
.hdr
.id
++)
124 if (agent
== file
->agent
[packet
->mad
.hdr
.id
]) {
125 spin_lock_irq(&file
->recv_lock
);
126 list_add_tail(&packet
->list
, &file
->recv_list
);
127 spin_unlock_irq(&file
->recv_lock
);
128 wake_up_interruptible(&file
->recv_wait
);
133 up_read(&file
->agent_mutex
);
138 static void send_handler(struct ib_mad_agent
*agent
,
139 struct ib_mad_send_wc
*send_wc
)
141 struct ib_umad_file
*file
= agent
->context
;
142 struct ib_umad_packet
*timeout
, *packet
=
143 (void *) (unsigned long) send_wc
->wr_id
;
145 ib_destroy_ah(packet
->msg
->send_wr
.wr
.ud
.ah
);
146 ib_free_send_mad(packet
->msg
);
148 if (send_wc
->status
== IB_WC_RESP_TIMEOUT_ERR
) {
149 timeout
= kmalloc(sizeof *timeout
+ sizeof (struct ib_mad_hdr
),
154 memset(timeout
, 0, sizeof *timeout
+ sizeof (struct ib_mad_hdr
));
156 timeout
->length
= sizeof (struct ib_mad_hdr
);
157 timeout
->mad
.hdr
.id
= packet
->mad
.hdr
.id
;
158 timeout
->mad
.hdr
.status
= ETIMEDOUT
;
159 memcpy(timeout
->mad
.data
, packet
->mad
.data
,
160 sizeof (struct ib_mad_hdr
));
162 if (!queue_packet(file
, agent
, timeout
))
169 static void recv_handler(struct ib_mad_agent
*agent
,
170 struct ib_mad_recv_wc
*mad_recv_wc
)
172 struct ib_umad_file
*file
= agent
->context
;
173 struct ib_umad_packet
*packet
;
176 if (mad_recv_wc
->wc
->status
!= IB_WC_SUCCESS
)
179 length
= mad_recv_wc
->mad_len
;
180 packet
= kmalloc(sizeof *packet
+ length
, GFP_KERNEL
);
184 memset(packet
, 0, sizeof *packet
+ length
);
185 packet
->length
= length
;
187 ib_coalesce_recv_mad(mad_recv_wc
, packet
->mad
.data
);
189 packet
->mad
.hdr
.status
= 0;
190 packet
->mad
.hdr
.length
= length
+ sizeof (struct ib_user_mad
);
191 packet
->mad
.hdr
.qpn
= cpu_to_be32(mad_recv_wc
->wc
->src_qp
);
192 packet
->mad
.hdr
.lid
= cpu_to_be16(mad_recv_wc
->wc
->slid
);
193 packet
->mad
.hdr
.sl
= mad_recv_wc
->wc
->sl
;
194 packet
->mad
.hdr
.path_bits
= mad_recv_wc
->wc
->dlid_path_bits
;
195 packet
->mad
.hdr
.grh_present
= !!(mad_recv_wc
->wc
->wc_flags
& IB_WC_GRH
);
196 if (packet
->mad
.hdr
.grh_present
) {
198 packet
->mad
.hdr
.gid_index
= 0;
199 packet
->mad
.hdr
.hop_limit
= 0;
200 packet
->mad
.hdr
.traffic_class
= 0;
201 memset(packet
->mad
.hdr
.gid
, 0, 16);
202 packet
->mad
.hdr
.flow_label
= 0;
205 if (queue_packet(file
, agent
, packet
))
209 ib_free_recv_mad(mad_recv_wc
);
212 static ssize_t
ib_umad_read(struct file
*filp
, char __user
*buf
,
213 size_t count
, loff_t
*pos
)
215 struct ib_umad_file
*file
= filp
->private_data
;
216 struct ib_umad_packet
*packet
;
219 if (count
< sizeof (struct ib_user_mad
) + sizeof (struct ib_mad
))
222 spin_lock_irq(&file
->recv_lock
);
224 while (list_empty(&file
->recv_list
)) {
225 spin_unlock_irq(&file
->recv_lock
);
227 if (filp
->f_flags
& O_NONBLOCK
)
230 if (wait_event_interruptible(file
->recv_wait
,
231 !list_empty(&file
->recv_list
)))
234 spin_lock_irq(&file
->recv_lock
);
237 packet
= list_entry(file
->recv_list
.next
, struct ib_umad_packet
, list
);
238 list_del(&packet
->list
);
240 spin_unlock_irq(&file
->recv_lock
);
242 if (count
< packet
->length
+ sizeof (struct ib_user_mad
)) {
243 /* Return length needed (and first RMPP segment) if too small */
244 if (copy_to_user(buf
, &packet
->mad
,
245 sizeof (struct ib_user_mad
) + sizeof (struct ib_mad
)))
249 } else if (copy_to_user(buf
, &packet
->mad
,
250 packet
->length
+ sizeof (struct ib_user_mad
)))
253 ret
= packet
->length
+ sizeof (struct ib_user_mad
);
256 spin_lock_irq(&file
->recv_lock
);
257 list_add(&packet
->list
, &file
->recv_list
);
258 spin_unlock_irq(&file
->recv_lock
);
264 static ssize_t
ib_umad_write(struct file
*filp
, const char __user
*buf
,
265 size_t count
, loff_t
*pos
)
267 struct ib_umad_file
*file
= filp
->private_data
;
268 struct ib_umad_packet
*packet
;
269 struct ib_mad_agent
*agent
;
270 struct ib_ah_attr ah_attr
;
271 struct ib_send_wr
*bad_wr
;
272 struct ib_rmpp_mad
*rmpp_mad
;
275 int ret
, length
, hdr_len
, data_len
, rmpp_hdr_size
;
278 if (count
< sizeof (struct ib_user_mad
))
281 length
= count
- sizeof (struct ib_user_mad
);
282 packet
= kmalloc(sizeof *packet
+ sizeof(struct ib_mad_hdr
) +
283 sizeof(struct ib_rmpp_hdr
), GFP_KERNEL
);
287 if (copy_from_user(&packet
->mad
, buf
,
288 sizeof (struct ib_user_mad
) +
289 sizeof(struct ib_mad_hdr
) +
290 sizeof(struct ib_rmpp_hdr
))) {
295 if (packet
->mad
.hdr
.id
< 0 ||
296 packet
->mad
.hdr
.id
>= IB_UMAD_MAX_AGENTS
) {
301 packet
->length
= length
;
303 down_read(&file
->agent_mutex
);
305 agent
= file
->agent
[packet
->mad
.hdr
.id
];
311 memset(&ah_attr
, 0, sizeof ah_attr
);
312 ah_attr
.dlid
= be16_to_cpu(packet
->mad
.hdr
.lid
);
313 ah_attr
.sl
= packet
->mad
.hdr
.sl
;
314 ah_attr
.src_path_bits
= packet
->mad
.hdr
.path_bits
;
315 ah_attr
.port_num
= file
->port
->port_num
;
316 if (packet
->mad
.hdr
.grh_present
) {
317 ah_attr
.ah_flags
= IB_AH_GRH
;
318 memcpy(ah_attr
.grh
.dgid
.raw
, packet
->mad
.hdr
.gid
, 16);
319 ah_attr
.grh
.flow_label
= be32_to_cpu(packet
->mad
.hdr
.flow_label
);
320 ah_attr
.grh
.hop_limit
= packet
->mad
.hdr
.hop_limit
;
321 ah_attr
.grh
.traffic_class
= packet
->mad
.hdr
.traffic_class
;
324 packet
->ah
= ib_create_ah(agent
->qp
->pd
, &ah_attr
);
325 if (IS_ERR(packet
->ah
)) {
326 ret
= PTR_ERR(packet
->ah
);
330 rmpp_mad
= (struct ib_rmpp_mad
*) packet
->mad
.data
;
331 if (ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) & IB_MGMT_RMPP_FLAG_ACTIVE
) {
333 if (!agent
->rmpp_version
) {
337 /* Validate that management class can support RMPP */
338 if (rmpp_mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) {
339 hdr_len
= offsetof(struct ib_sa_mad
, data
);
341 } else if ((rmpp_mad
->mad_hdr
.mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
342 (rmpp_mad
->mad_hdr
.mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)) {
343 hdr_len
= offsetof(struct ib_vendor_mad
, data
);
344 data_len
= length
- hdr_len
;
351 if (length
> sizeof(struct ib_mad
)) {
355 hdr_len
= offsetof(struct ib_mad
, data
);
356 data_len
= length
- hdr_len
;
359 packet
->msg
= ib_create_send_mad(agent
,
360 be32_to_cpu(packet
->mad
.hdr
.qpn
),
361 0, packet
->ah
, rmpp_active
,
364 if (IS_ERR(packet
->msg
)) {
365 ret
= PTR_ERR(packet
->msg
);
369 packet
->msg
->send_wr
.wr
.ud
.timeout_ms
= packet
->mad
.hdr
.timeout_ms
;
370 packet
->msg
->send_wr
.wr
.ud
.retries
= packet
->mad
.hdr
.retries
;
372 /* Override send WR WRID initialized in ib_create_send_mad */
373 packet
->msg
->send_wr
.wr_id
= (unsigned long) packet
;
376 /* Copy message from user into send buffer */
377 if (copy_from_user(packet
->msg
->mad
,
378 buf
+ sizeof(struct ib_user_mad
), length
)) {
383 rmpp_hdr_size
= sizeof(struct ib_mad_hdr
) +
384 sizeof(struct ib_rmpp_hdr
);
386 /* Only copy MAD headers (RMPP header in place) */
387 memcpy(packet
->msg
->mad
, packet
->mad
.data
,
388 sizeof(struct ib_mad_hdr
));
390 /* Now, copy rest of message from user into send buffer */
391 if (copy_from_user(((struct ib_rmpp_mad
*) packet
->msg
->mad
)->data
,
392 buf
+ sizeof (struct ib_user_mad
) + rmpp_hdr_size
,
393 length
- rmpp_hdr_size
)) {
400 * If userspace is generating a request that will generate a
401 * response, we need to make sure the high-order part of the
402 * transaction ID matches the agent being used to send the
405 method
= packet
->msg
->mad
->mad_hdr
.method
;
407 if (!(method
& IB_MGMT_METHOD_RESP
) &&
408 method
!= IB_MGMT_METHOD_TRAP_REPRESS
&&
409 method
!= IB_MGMT_METHOD_SEND
) {
410 tid
= &packet
->msg
->mad
->mad_hdr
.tid
;
411 *tid
= cpu_to_be64(((u64
) agent
->hi_tid
) << 32 |
412 (be64_to_cpup(tid
) & 0xffffffff));
415 ret
= ib_post_send_mad(agent
, &packet
->msg
->send_wr
, &bad_wr
);
419 up_read(&file
->agent_mutex
);
421 return sizeof (struct ib_user_mad_hdr
) + packet
->length
;
424 ib_free_send_mad(packet
->msg
);
427 ib_destroy_ah(packet
->ah
);
430 up_read(&file
->agent_mutex
);
437 static unsigned int ib_umad_poll(struct file
*filp
, struct poll_table_struct
*wait
)
439 struct ib_umad_file
*file
= filp
->private_data
;
441 /* we will always be able to post a MAD send */
442 unsigned int mask
= POLLOUT
| POLLWRNORM
;
444 poll_wait(filp
, &file
->recv_wait
, wait
);
446 if (!list_empty(&file
->recv_list
))
447 mask
|= POLLIN
| POLLRDNORM
;
452 static int ib_umad_reg_agent(struct ib_umad_file
*file
, unsigned long arg
)
454 struct ib_user_mad_reg_req ureq
;
455 struct ib_mad_reg_req req
;
456 struct ib_mad_agent
*agent
;
460 down_write(&file
->agent_mutex
);
462 if (copy_from_user(&ureq
, (void __user
*) arg
, sizeof ureq
)) {
467 if (ureq
.qpn
!= 0 && ureq
.qpn
!= 1) {
472 for (agent_id
= 0; agent_id
< IB_UMAD_MAX_AGENTS
; ++agent_id
)
473 if (!file
->agent
[agent_id
])
480 if (ureq
.mgmt_class
) {
481 req
.mgmt_class
= ureq
.mgmt_class
;
482 req
.mgmt_class_version
= ureq
.mgmt_class_version
;
483 memcpy(req
.method_mask
, ureq
.method_mask
, sizeof req
.method_mask
);
484 memcpy(req
.oui
, ureq
.oui
, sizeof req
.oui
);
487 agent
= ib_register_mad_agent(file
->port
->ib_dev
, file
->port
->port_num
,
488 ureq
.qpn
? IB_QPT_GSI
: IB_QPT_SMI
,
489 ureq
.mgmt_class
? &req
: NULL
,
491 send_handler
, recv_handler
, file
);
493 ret
= PTR_ERR(agent
);
497 file
->agent
[agent_id
] = agent
;
499 file
->mr
[agent_id
] = ib_get_dma_mr(agent
->qp
->pd
, IB_ACCESS_LOCAL_WRITE
);
500 if (IS_ERR(file
->mr
[agent_id
])) {
505 if (put_user(agent_id
,
506 (u32 __user
*) (arg
+ offsetof(struct ib_user_mad_reg_req
, id
)))) {
515 ib_dereg_mr(file
->mr
[agent_id
]);
518 file
->agent
[agent_id
] = NULL
;
519 ib_unregister_mad_agent(agent
);
522 up_write(&file
->agent_mutex
);
526 static int ib_umad_unreg_agent(struct ib_umad_file
*file
, unsigned long arg
)
531 down_write(&file
->agent_mutex
);
533 if (get_user(id
, (u32 __user
*) arg
)) {
538 if (id
< 0 || id
>= IB_UMAD_MAX_AGENTS
|| !file
->agent
[id
]) {
543 ib_dereg_mr(file
->mr
[id
]);
544 ib_unregister_mad_agent(file
->agent
[id
]);
545 file
->agent
[id
] = NULL
;
548 up_write(&file
->agent_mutex
);
552 static long ib_umad_ioctl(struct file
*filp
, unsigned int cmd
,
556 case IB_USER_MAD_REGISTER_AGENT
:
557 return ib_umad_reg_agent(filp
->private_data
, arg
);
558 case IB_USER_MAD_UNREGISTER_AGENT
:
559 return ib_umad_unreg_agent(filp
->private_data
, arg
);
565 static int ib_umad_open(struct inode
*inode
, struct file
*filp
)
567 struct ib_umad_port
*port
=
568 container_of(inode
->i_cdev
, struct ib_umad_port
, dev
);
569 struct ib_umad_file
*file
;
571 file
= kmalloc(sizeof *file
, GFP_KERNEL
);
575 memset(file
, 0, sizeof *file
);
577 spin_lock_init(&file
->recv_lock
);
578 init_rwsem(&file
->agent_mutex
);
579 INIT_LIST_HEAD(&file
->recv_list
);
580 init_waitqueue_head(&file
->recv_wait
);
583 filp
->private_data
= file
;
588 static int ib_umad_close(struct inode
*inode
, struct file
*filp
)
590 struct ib_umad_file
*file
= filp
->private_data
;
591 struct ib_umad_packet
*packet
, *tmp
;
594 for (i
= 0; i
< IB_UMAD_MAX_AGENTS
; ++i
)
595 if (file
->agent
[i
]) {
596 ib_dereg_mr(file
->mr
[i
]);
597 ib_unregister_mad_agent(file
->agent
[i
]);
600 list_for_each_entry_safe(packet
, tmp
, &file
->recv_list
, list
)
608 static struct file_operations umad_fops
= {
609 .owner
= THIS_MODULE
,
610 .read
= ib_umad_read
,
611 .write
= ib_umad_write
,
612 .poll
= ib_umad_poll
,
613 .unlocked_ioctl
= ib_umad_ioctl
,
614 .compat_ioctl
= ib_umad_ioctl
,
615 .open
= ib_umad_open
,
616 .release
= ib_umad_close
619 static int ib_umad_sm_open(struct inode
*inode
, struct file
*filp
)
621 struct ib_umad_port
*port
=
622 container_of(inode
->i_cdev
, struct ib_umad_port
, sm_dev
);
623 struct ib_port_modify props
= {
624 .set_port_cap_mask
= IB_PORT_SM
628 if (filp
->f_flags
& O_NONBLOCK
) {
629 if (down_trylock(&port
->sm_sem
))
632 if (down_interruptible(&port
->sm_sem
))
636 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
642 filp
->private_data
= port
;
647 static int ib_umad_sm_close(struct inode
*inode
, struct file
*filp
)
649 struct ib_umad_port
*port
= filp
->private_data
;
650 struct ib_port_modify props
= {
651 .clr_port_cap_mask
= IB_PORT_SM
655 ret
= ib_modify_port(port
->ib_dev
, port
->port_num
, 0, &props
);
661 static struct file_operations umad_sm_fops
= {
662 .owner
= THIS_MODULE
,
663 .open
= ib_umad_sm_open
,
664 .release
= ib_umad_sm_close
667 static struct ib_client umad_client
= {
669 .add
= ib_umad_add_one
,
670 .remove
= ib_umad_remove_one
673 static ssize_t
show_dev(struct class_device
*class_dev
, char *buf
)
675 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
677 if (class_dev
== &port
->class_dev
)
678 return print_dev_t(buf
, port
->dev
.dev
);
680 return print_dev_t(buf
, port
->sm_dev
.dev
);
682 static CLASS_DEVICE_ATTR(dev
, S_IRUGO
, show_dev
, NULL
);
684 static ssize_t
show_ibdev(struct class_device
*class_dev
, char *buf
)
686 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
688 return sprintf(buf
, "%s\n", port
->ib_dev
->name
);
690 static CLASS_DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
692 static ssize_t
show_port(struct class_device
*class_dev
, char *buf
)
694 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
696 return sprintf(buf
, "%d\n", port
->port_num
);
698 static CLASS_DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
700 static void ib_umad_release_dev(struct kref
*ref
)
702 struct ib_umad_device
*dev
=
703 container_of(ref
, struct ib_umad_device
, ref
);
708 static void ib_umad_release_port(struct class_device
*class_dev
)
710 struct ib_umad_port
*port
= class_get_devdata(class_dev
);
712 if (class_dev
== &port
->class_dev
) {
713 cdev_del(&port
->dev
);
714 clear_bit(port
->devnum
, dev_map
);
716 cdev_del(&port
->sm_dev
);
717 clear_bit(port
->sm_devnum
, dev_map
);
720 kref_put(&port
->umad_dev
->ref
, ib_umad_release_dev
);
723 static struct class umad_class
= {
724 .name
= "infiniband_mad",
725 .release
= ib_umad_release_port
728 static ssize_t
show_abi_version(struct class *class, char *buf
)
730 return sprintf(buf
, "%d\n", IB_USER_MAD_ABI_VERSION
);
732 static CLASS_ATTR(abi_version
, S_IRUGO
, show_abi_version
, NULL
);
734 static int ib_umad_init_port(struct ib_device
*device
, int port_num
,
735 struct ib_umad_port
*port
)
737 spin_lock(&map_lock
);
738 port
->devnum
= find_first_zero_bit(dev_map
, IB_UMAD_MAX_PORTS
);
739 if (port
->devnum
>= IB_UMAD_MAX_PORTS
) {
740 spin_unlock(&map_lock
);
743 port
->sm_devnum
= find_next_zero_bit(dev_map
, IB_UMAD_MAX_PORTS
* 2, IB_UMAD_MAX_PORTS
);
744 if (port
->sm_devnum
>= IB_UMAD_MAX_PORTS
* 2) {
745 spin_unlock(&map_lock
);
748 set_bit(port
->devnum
, dev_map
);
749 set_bit(port
->sm_devnum
, dev_map
);
750 spin_unlock(&map_lock
);
752 port
->ib_dev
= device
;
753 port
->port_num
= port_num
;
754 init_MUTEX(&port
->sm_sem
);
756 cdev_init(&port
->dev
, &umad_fops
);
757 port
->dev
.owner
= THIS_MODULE
;
758 kobject_set_name(&port
->dev
.kobj
, "umad%d", port
->devnum
);
759 if (cdev_add(&port
->dev
, base_dev
+ port
->devnum
, 1))
762 port
->class_dev
.class = &umad_class
;
763 port
->class_dev
.dev
= device
->dma_device
;
765 snprintf(port
->class_dev
.class_id
, BUS_ID_SIZE
, "umad%d", port
->devnum
);
767 if (class_device_register(&port
->class_dev
))
770 class_set_devdata(&port
->class_dev
, port
);
771 kref_get(&port
->umad_dev
->ref
);
773 if (class_device_create_file(&port
->class_dev
, &class_device_attr_dev
))
775 if (class_device_create_file(&port
->class_dev
, &class_device_attr_ibdev
))
777 if (class_device_create_file(&port
->class_dev
, &class_device_attr_port
))
780 cdev_init(&port
->sm_dev
, &umad_sm_fops
);
781 port
->sm_dev
.owner
= THIS_MODULE
;
782 kobject_set_name(&port
->dev
.kobj
, "issm%d", port
->sm_devnum
- IB_UMAD_MAX_PORTS
);
783 if (cdev_add(&port
->sm_dev
, base_dev
+ port
->sm_devnum
, 1))
786 port
->sm_class_dev
.class = &umad_class
;
787 port
->sm_class_dev
.dev
= device
->dma_device
;
789 snprintf(port
->sm_class_dev
.class_id
, BUS_ID_SIZE
, "issm%d", port
->sm_devnum
- IB_UMAD_MAX_PORTS
);
791 if (class_device_register(&port
->sm_class_dev
))
794 class_set_devdata(&port
->sm_class_dev
, port
);
795 kref_get(&port
->umad_dev
->ref
);
797 if (class_device_create_file(&port
->sm_class_dev
, &class_device_attr_dev
))
799 if (class_device_create_file(&port
->sm_class_dev
, &class_device_attr_ibdev
))
801 if (class_device_create_file(&port
->sm_class_dev
, &class_device_attr_port
))
807 class_device_unregister(&port
->sm_class_dev
);
810 cdev_del(&port
->sm_dev
);
813 class_device_unregister(&port
->class_dev
);
816 cdev_del(&port
->dev
);
817 clear_bit(port
->devnum
, dev_map
);
822 static void ib_umad_add_one(struct ib_device
*device
)
824 struct ib_umad_device
*umad_dev
;
827 if (device
->node_type
== IB_NODE_SWITCH
)
831 e
= device
->phys_port_cnt
;
834 umad_dev
= kmalloc(sizeof *umad_dev
+
835 (e
- s
+ 1) * sizeof (struct ib_umad_port
),
840 memset(umad_dev
, 0, sizeof *umad_dev
+
841 (e
- s
+ 1) * sizeof (struct ib_umad_port
));
843 kref_init(&umad_dev
->ref
);
845 umad_dev
->start_port
= s
;
846 umad_dev
->end_port
= e
;
848 for (i
= s
; i
<= e
; ++i
) {
849 umad_dev
->port
[i
- s
].umad_dev
= umad_dev
;
851 if (ib_umad_init_port(device
, i
, &umad_dev
->port
[i
- s
]))
855 ib_set_client_data(device
, &umad_client
, umad_dev
);
861 class_device_unregister(&umad_dev
->port
[i
- s
].class_dev
);
862 class_device_unregister(&umad_dev
->port
[i
- s
].sm_class_dev
);
865 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
868 static void ib_umad_remove_one(struct ib_device
*device
)
870 struct ib_umad_device
*umad_dev
= ib_get_client_data(device
, &umad_client
);
876 for (i
= 0; i
<= umad_dev
->end_port
- umad_dev
->start_port
; ++i
) {
877 class_device_unregister(&umad_dev
->port
[i
].class_dev
);
878 class_device_unregister(&umad_dev
->port
[i
].sm_class_dev
);
881 kref_put(&umad_dev
->ref
, ib_umad_release_dev
);
884 static int __init
ib_umad_init(void)
888 spin_lock_init(&map_lock
);
890 ret
= register_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2,
893 printk(KERN_ERR
"user_mad: couldn't register device number\n");
897 ret
= class_register(&umad_class
);
899 printk(KERN_ERR
"user_mad: couldn't create class infiniband_mad\n");
903 ret
= class_create_file(&umad_class
, &class_attr_abi_version
);
905 printk(KERN_ERR
"user_mad: couldn't create abi_version attribute\n");
909 ret
= ib_register_client(&umad_client
);
911 printk(KERN_ERR
"user_mad: couldn't register ib_umad client\n");
918 class_unregister(&umad_class
);
921 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
927 static void __exit
ib_umad_cleanup(void)
929 ib_unregister_client(&umad_client
);
930 class_unregister(&umad_class
);
931 unregister_chrdev_region(base_dev
, IB_UMAD_MAX_PORTS
* 2);
934 module_init(ib_umad_init
);
935 module_exit(ib_umad_cleanup
);