4 * Linux device interface for the IPMI message handler.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/poll.h>
39 #include <linux/sched.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/mutex.h>
44 #include <linux/init.h>
45 #include <linux/device.h>
46 #include <linux/compat.h>
47 #include <linux/smp_lock.h>
49 struct ipmi_file_private
52 spinlock_t recv_msg_lock
;
53 struct list_head recv_msgs
;
55 struct fasync_struct
*fasync_queue
;
56 wait_queue_head_t wait
;
57 struct mutex recv_mutex
;
59 unsigned int default_retry_time_ms
;
62 static void file_receive_handler(struct ipmi_recv_msg
*msg
,
65 struct ipmi_file_private
*priv
= handler_data
;
69 spin_lock_irqsave(&(priv
->recv_msg_lock
), flags
);
71 was_empty
= list_empty(&(priv
->recv_msgs
));
72 list_add_tail(&(msg
->link
), &(priv
->recv_msgs
));
75 wake_up_interruptible(&priv
->wait
);
76 kill_fasync(&priv
->fasync_queue
, SIGIO
, POLL_IN
);
79 spin_unlock_irqrestore(&(priv
->recv_msg_lock
), flags
);
82 static unsigned int ipmi_poll(struct file
*file
, poll_table
*wait
)
84 struct ipmi_file_private
*priv
= file
->private_data
;
85 unsigned int mask
= 0;
88 poll_wait(file
, &priv
->wait
, wait
);
90 spin_lock_irqsave(&priv
->recv_msg_lock
, flags
);
92 if (!list_empty(&(priv
->recv_msgs
)))
93 mask
|= (POLLIN
| POLLRDNORM
);
95 spin_unlock_irqrestore(&priv
->recv_msg_lock
, flags
);
100 static int ipmi_fasync(int fd
, struct file
*file
, int on
)
102 struct ipmi_file_private
*priv
= file
->private_data
;
105 lock_kernel(); /* could race against open() otherwise */
106 result
= fasync_helper(fd
, file
, on
, &priv
->fasync_queue
);
112 static struct ipmi_user_hndl ipmi_hndlrs
=
114 .ipmi_recv_hndl
= file_receive_handler
,
117 static int ipmi_open(struct inode
*inode
, struct file
*file
)
119 int if_num
= iminor(inode
);
121 struct ipmi_file_private
*priv
;
124 priv
= kmalloc(sizeof(*priv
), GFP_KERNEL
);
131 rv
= ipmi_create_user(if_num
,
140 file
->private_data
= priv
;
142 spin_lock_init(&(priv
->recv_msg_lock
));
143 INIT_LIST_HEAD(&(priv
->recv_msgs
));
144 init_waitqueue_head(&priv
->wait
);
145 priv
->fasync_queue
= NULL
;
146 mutex_init(&priv
->recv_mutex
);
148 /* Use the low-level defaults. */
149 priv
->default_retries
= -1;
150 priv
->default_retry_time_ms
= 0;
157 static int ipmi_release(struct inode
*inode
, struct file
*file
)
159 struct ipmi_file_private
*priv
= file
->private_data
;
162 rv
= ipmi_destroy_user(priv
->user
);
166 /* FIXME - free the messages in the list. */
172 static int handle_send_req(ipmi_user_t user
,
173 struct ipmi_req
*req
,
175 unsigned int retry_time_ms
)
178 struct ipmi_addr addr
;
179 struct kernel_ipmi_msg msg
;
181 if (req
->addr_len
> sizeof(struct ipmi_addr
))
184 if (copy_from_user(&addr
, req
->addr
, req
->addr_len
))
187 msg
.netfn
= req
->msg
.netfn
;
188 msg
.cmd
= req
->msg
.cmd
;
189 msg
.data_len
= req
->msg
.data_len
;
190 msg
.data
= kmalloc(IPMI_MAX_MSG_LENGTH
, GFP_KERNEL
);
194 /* From here out we cannot return, we must jump to "out" for
195 error exits to free msgdata. */
197 rv
= ipmi_validate_addr(&addr
, req
->addr_len
);
201 if (req
->msg
.data
!= NULL
) {
202 if (req
->msg
.data_len
> IPMI_MAX_MSG_LENGTH
) {
207 if (copy_from_user(msg
.data
,
218 rv
= ipmi_request_settime(user
,
231 static int ipmi_ioctl(struct inode
*inode
,
237 struct ipmi_file_private
*priv
= file
->private_data
;
238 void __user
*arg
= (void __user
*)data
;
242 case IPMICTL_SEND_COMMAND
:
246 if (copy_from_user(&req
, arg
, sizeof(req
))) {
251 rv
= handle_send_req(priv
->user
,
253 priv
->default_retries
,
254 priv
->default_retry_time_ms
);
258 case IPMICTL_SEND_COMMAND_SETTIME
:
260 struct ipmi_req_settime req
;
262 if (copy_from_user(&req
, arg
, sizeof(req
))) {
267 rv
= handle_send_req(priv
->user
,
274 case IPMICTL_RECEIVE_MSG
:
275 case IPMICTL_RECEIVE_MSG_TRUNC
:
277 struct ipmi_recv rsp
;
279 struct list_head
*entry
;
280 struct ipmi_recv_msg
*msg
;
285 if (copy_from_user(&rsp
, arg
, sizeof(rsp
))) {
290 /* We claim a mutex because we don't want two
291 users getting something from the queue at a time.
292 Since we have to release the spinlock before we can
293 copy the data to the user, it's possible another
294 user will grab something from the queue, too. Then
295 the messages might get out of order if something
296 fails and the message gets put back onto the
297 queue. This mutex prevents that problem. */
298 mutex_lock(&priv
->recv_mutex
);
300 /* Grab the message off the list. */
301 spin_lock_irqsave(&(priv
->recv_msg_lock
), flags
);
302 if (list_empty(&(priv
->recv_msgs
))) {
303 spin_unlock_irqrestore(&(priv
->recv_msg_lock
), flags
);
307 entry
= priv
->recv_msgs
.next
;
308 msg
= list_entry(entry
, struct ipmi_recv_msg
, link
);
310 spin_unlock_irqrestore(&(priv
->recv_msg_lock
), flags
);
312 addr_len
= ipmi_addr_length(msg
->addr
.addr_type
);
313 if (rsp
.addr_len
< addr_len
)
316 goto recv_putback_on_err
;
319 if (copy_to_user(rsp
.addr
, &(msg
->addr
), addr_len
)) {
321 goto recv_putback_on_err
;
323 rsp
.addr_len
= addr_len
;
325 rsp
.recv_type
= msg
->recv_type
;
326 rsp
.msgid
= msg
->msgid
;
327 rsp
.msg
.netfn
= msg
->msg
.netfn
;
328 rsp
.msg
.cmd
= msg
->msg
.cmd
;
330 if (msg
->msg
.data_len
> 0) {
331 if (rsp
.msg
.data_len
< msg
->msg
.data_len
) {
333 if (cmd
== IPMICTL_RECEIVE_MSG_TRUNC
) {
334 msg
->msg
.data_len
= rsp
.msg
.data_len
;
336 goto recv_putback_on_err
;
340 if (copy_to_user(rsp
.msg
.data
,
345 goto recv_putback_on_err
;
347 rsp
.msg
.data_len
= msg
->msg
.data_len
;
349 rsp
.msg
.data_len
= 0;
352 if (copy_to_user(arg
, &rsp
, sizeof(rsp
))) {
354 goto recv_putback_on_err
;
357 mutex_unlock(&priv
->recv_mutex
);
358 ipmi_free_recv_msg(msg
);
362 /* If we got an error, put the message back onto
363 the head of the queue. */
364 spin_lock_irqsave(&(priv
->recv_msg_lock
), flags
);
365 list_add(entry
, &(priv
->recv_msgs
));
366 spin_unlock_irqrestore(&(priv
->recv_msg_lock
), flags
);
367 mutex_unlock(&priv
->recv_mutex
);
371 mutex_unlock(&priv
->recv_mutex
);
375 case IPMICTL_REGISTER_FOR_CMD
:
377 struct ipmi_cmdspec val
;
379 if (copy_from_user(&val
, arg
, sizeof(val
))) {
384 rv
= ipmi_register_for_cmd(priv
->user
, val
.netfn
, val
.cmd
,
389 case IPMICTL_UNREGISTER_FOR_CMD
:
391 struct ipmi_cmdspec val
;
393 if (copy_from_user(&val
, arg
, sizeof(val
))) {
398 rv
= ipmi_unregister_for_cmd(priv
->user
, val
.netfn
, val
.cmd
,
403 case IPMICTL_REGISTER_FOR_CMD_CHANS
:
405 struct ipmi_cmdspec_chans val
;
407 if (copy_from_user(&val
, arg
, sizeof(val
))) {
412 rv
= ipmi_register_for_cmd(priv
->user
, val
.netfn
, val
.cmd
,
417 case IPMICTL_UNREGISTER_FOR_CMD_CHANS
:
419 struct ipmi_cmdspec_chans val
;
421 if (copy_from_user(&val
, arg
, sizeof(val
))) {
426 rv
= ipmi_unregister_for_cmd(priv
->user
, val
.netfn
, val
.cmd
,
431 case IPMICTL_SET_GETS_EVENTS_CMD
:
435 if (copy_from_user(&val
, arg
, sizeof(val
))) {
440 rv
= ipmi_set_gets_events(priv
->user
, val
);
444 /* The next four are legacy, not per-channel. */
445 case IPMICTL_SET_MY_ADDRESS_CMD
:
449 if (copy_from_user(&val
, arg
, sizeof(val
))) {
454 rv
= ipmi_set_my_address(priv
->user
, 0, val
);
458 case IPMICTL_GET_MY_ADDRESS_CMD
:
463 rv
= ipmi_get_my_address(priv
->user
, 0, &rval
);
469 if (copy_to_user(arg
, &val
, sizeof(val
))) {
476 case IPMICTL_SET_MY_LUN_CMD
:
480 if (copy_from_user(&val
, arg
, sizeof(val
))) {
485 rv
= ipmi_set_my_LUN(priv
->user
, 0, val
);
489 case IPMICTL_GET_MY_LUN_CMD
:
494 rv
= ipmi_get_my_LUN(priv
->user
, 0, &rval
);
500 if (copy_to_user(arg
, &val
, sizeof(val
))) {
507 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD
:
509 struct ipmi_channel_lun_address_set val
;
511 if (copy_from_user(&val
, arg
, sizeof(val
))) {
516 return ipmi_set_my_address(priv
->user
, val
.channel
, val
.value
);
520 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD
:
522 struct ipmi_channel_lun_address_set val
;
524 if (copy_from_user(&val
, arg
, sizeof(val
))) {
529 rv
= ipmi_get_my_address(priv
->user
, val
.channel
, &val
.value
);
533 if (copy_to_user(arg
, &val
, sizeof(val
))) {
540 case IPMICTL_SET_MY_CHANNEL_LUN_CMD
:
542 struct ipmi_channel_lun_address_set val
;
544 if (copy_from_user(&val
, arg
, sizeof(val
))) {
549 rv
= ipmi_set_my_LUN(priv
->user
, val
.channel
, val
.value
);
553 case IPMICTL_GET_MY_CHANNEL_LUN_CMD
:
555 struct ipmi_channel_lun_address_set val
;
557 if (copy_from_user(&val
, arg
, sizeof(val
))) {
562 rv
= ipmi_get_my_LUN(priv
->user
, val
.channel
, &val
.value
);
566 if (copy_to_user(arg
, &val
, sizeof(val
))) {
573 case IPMICTL_SET_TIMING_PARMS_CMD
:
575 struct ipmi_timing_parms parms
;
577 if (copy_from_user(&parms
, arg
, sizeof(parms
))) {
582 priv
->default_retries
= parms
.retries
;
583 priv
->default_retry_time_ms
= parms
.retry_time_ms
;
588 case IPMICTL_GET_TIMING_PARMS_CMD
:
590 struct ipmi_timing_parms parms
;
592 parms
.retries
= priv
->default_retries
;
593 parms
.retry_time_ms
= priv
->default_retry_time_ms
;
595 if (copy_to_user(arg
, &parms
, sizeof(parms
))) {
604 case IPMICTL_GET_MAINTENANCE_MODE_CMD
:
608 mode
= ipmi_get_maintenance_mode(priv
->user
);
609 if (copy_to_user(arg
, &mode
, sizeof(mode
))) {
617 case IPMICTL_SET_MAINTENANCE_MODE_CMD
:
621 if (copy_from_user(&mode
, arg
, sizeof(mode
))) {
625 rv
= ipmi_set_maintenance_mode(priv
->user
, mode
);
636 * The following code contains code for supporting 32-bit compatible
637 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
640 #define COMPAT_IPMICTL_SEND_COMMAND \
641 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
642 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
643 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
644 #define COMPAT_IPMICTL_RECEIVE_MSG \
645 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
646 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
647 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
649 struct compat_ipmi_msg
{
656 struct compat_ipmi_req
{
658 compat_uint_t addr_len
;
660 struct compat_ipmi_msg msg
;
663 struct compat_ipmi_recv
{
664 compat_int_t recv_type
;
666 compat_uint_t addr_len
;
668 struct compat_ipmi_msg msg
;
671 struct compat_ipmi_req_settime
{
672 struct compat_ipmi_req req
;
673 compat_int_t retries
;
674 compat_uint_t retry_time_ms
;
678 * Define some helper functions for copying IPMI data
680 static long get_compat_ipmi_msg(struct ipmi_msg
*p64
,
681 struct compat_ipmi_msg __user
*p32
)
685 if (!access_ok(VERIFY_READ
, p32
, sizeof(*p32
)) ||
686 __get_user(p64
->netfn
, &p32
->netfn
) ||
687 __get_user(p64
->cmd
, &p32
->cmd
) ||
688 __get_user(p64
->data_len
, &p32
->data_len
) ||
689 __get_user(tmp
, &p32
->data
))
691 p64
->data
= compat_ptr(tmp
);
695 static long put_compat_ipmi_msg(struct ipmi_msg
*p64
,
696 struct compat_ipmi_msg __user
*p32
)
698 if (!access_ok(VERIFY_WRITE
, p32
, sizeof(*p32
)) ||
699 __put_user(p64
->netfn
, &p32
->netfn
) ||
700 __put_user(p64
->cmd
, &p32
->cmd
) ||
701 __put_user(p64
->data_len
, &p32
->data_len
))
706 static long get_compat_ipmi_req(struct ipmi_req
*p64
,
707 struct compat_ipmi_req __user
*p32
)
712 if (!access_ok(VERIFY_READ
, p32
, sizeof(*p32
)) ||
713 __get_user(tmp
, &p32
->addr
) ||
714 __get_user(p64
->addr_len
, &p32
->addr_len
) ||
715 __get_user(p64
->msgid
, &p32
->msgid
) ||
716 get_compat_ipmi_msg(&p64
->msg
, &p32
->msg
))
718 p64
->addr
= compat_ptr(tmp
);
722 static long get_compat_ipmi_req_settime(struct ipmi_req_settime
*p64
,
723 struct compat_ipmi_req_settime __user
*p32
)
725 if (!access_ok(VERIFY_READ
, p32
, sizeof(*p32
)) ||
726 get_compat_ipmi_req(&p64
->req
, &p32
->req
) ||
727 __get_user(p64
->retries
, &p32
->retries
) ||
728 __get_user(p64
->retry_time_ms
, &p32
->retry_time_ms
))
733 static long get_compat_ipmi_recv(struct ipmi_recv
*p64
,
734 struct compat_ipmi_recv __user
*p32
)
738 if (!access_ok(VERIFY_READ
, p32
, sizeof(*p32
)) ||
739 __get_user(p64
->recv_type
, &p32
->recv_type
) ||
740 __get_user(tmp
, &p32
->addr
) ||
741 __get_user(p64
->addr_len
, &p32
->addr_len
) ||
742 __get_user(p64
->msgid
, &p32
->msgid
) ||
743 get_compat_ipmi_msg(&p64
->msg
, &p32
->msg
))
745 p64
->addr
= compat_ptr(tmp
);
749 static long put_compat_ipmi_recv(struct ipmi_recv
*p64
,
750 struct compat_ipmi_recv __user
*p32
)
752 if (!access_ok(VERIFY_WRITE
, p32
, sizeof(*p32
)) ||
753 __put_user(p64
->recv_type
, &p32
->recv_type
) ||
754 __put_user(p64
->addr_len
, &p32
->addr_len
) ||
755 __put_user(p64
->msgid
, &p32
->msgid
) ||
756 put_compat_ipmi_msg(&p64
->msg
, &p32
->msg
))
762 * Handle compatibility ioctls
764 static long compat_ipmi_ioctl(struct file
*filep
, unsigned int cmd
,
768 struct ipmi_file_private
*priv
= filep
->private_data
;
771 case COMPAT_IPMICTL_SEND_COMMAND
:
775 if (get_compat_ipmi_req(&rp
, compat_ptr(arg
)))
778 return handle_send_req(priv
->user
, &rp
,
779 priv
->default_retries
,
780 priv
->default_retry_time_ms
);
782 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME
:
784 struct ipmi_req_settime sp
;
786 if (get_compat_ipmi_req_settime(&sp
, compat_ptr(arg
)))
789 return handle_send_req(priv
->user
, &sp
.req
,
790 sp
.retries
, sp
.retry_time_ms
);
792 case COMPAT_IPMICTL_RECEIVE_MSG
:
793 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC
:
795 struct ipmi_recv __user
*precv64
;
796 struct ipmi_recv recv64
;
798 if (get_compat_ipmi_recv(&recv64
, compat_ptr(arg
)))
801 precv64
= compat_alloc_user_space(sizeof(recv64
));
802 if (copy_to_user(precv64
, &recv64
, sizeof(recv64
)))
805 rc
= ipmi_ioctl(filep
->f_path
.dentry
->d_inode
, filep
,
806 ((cmd
== COMPAT_IPMICTL_RECEIVE_MSG
)
807 ? IPMICTL_RECEIVE_MSG
808 : IPMICTL_RECEIVE_MSG_TRUNC
),
809 (unsigned long) precv64
);
813 if (copy_from_user(&recv64
, precv64
, sizeof(recv64
)))
816 if (put_compat_ipmi_recv(&recv64
, compat_ptr(arg
)))
822 return ipmi_ioctl(filep
->f_path
.dentry
->d_inode
, filep
, cmd
, arg
);
827 static const struct file_operations ipmi_fops
= {
828 .owner
= THIS_MODULE
,
831 .compat_ioctl
= compat_ipmi_ioctl
,
834 .release
= ipmi_release
,
835 .fasync
= ipmi_fasync
,
839 #define DEVICE_NAME "ipmidev"
841 static int ipmi_major
;
842 module_param(ipmi_major
, int, 0);
843 MODULE_PARM_DESC(ipmi_major
, "Sets the major number of the IPMI device. By"
844 " default, or if you set it to zero, it will choose the next"
845 " available device. Setting it to -1 will disable the"
846 " interface. Other values will set the major device number"
849 /* Keep track of the devices that are registered. */
850 struct ipmi_reg_list
{
852 struct list_head link
;
854 static LIST_HEAD(reg_list
);
855 static DEFINE_MUTEX(reg_list_mutex
);
857 static struct class *ipmi_class
;
859 static void ipmi_new_smi(int if_num
, struct device
*device
)
861 dev_t dev
= MKDEV(ipmi_major
, if_num
);
862 struct ipmi_reg_list
*entry
;
864 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
866 printk(KERN_ERR
"ipmi_devintf: Unable to create the"
867 " ipmi class device link\n");
872 mutex_lock(®_list_mutex
);
873 device_create(ipmi_class
, device
, dev
, NULL
, "ipmi%d", if_num
);
874 list_add(&entry
->link
, ®_list
);
875 mutex_unlock(®_list_mutex
);
878 static void ipmi_smi_gone(int if_num
)
880 dev_t dev
= MKDEV(ipmi_major
, if_num
);
881 struct ipmi_reg_list
*entry
;
883 mutex_lock(®_list_mutex
);
884 list_for_each_entry(entry
, ®_list
, link
) {
885 if (entry
->dev
== dev
) {
886 list_del(&entry
->link
);
891 device_destroy(ipmi_class
, dev
);
892 mutex_unlock(®_list_mutex
);
895 static struct ipmi_smi_watcher smi_watcher
=
897 .owner
= THIS_MODULE
,
898 .new_smi
= ipmi_new_smi
,
899 .smi_gone
= ipmi_smi_gone
,
902 static __init
int init_ipmi_devintf(void)
909 printk(KERN_INFO
"ipmi device interface\n");
911 ipmi_class
= class_create(THIS_MODULE
, "ipmi");
912 if (IS_ERR(ipmi_class
)) {
913 printk(KERN_ERR
"ipmi: can't register device class\n");
914 return PTR_ERR(ipmi_class
);
917 rv
= register_chrdev(ipmi_major
, DEVICE_NAME
, &ipmi_fops
);
919 class_destroy(ipmi_class
);
920 printk(KERN_ERR
"ipmi: can't get major %d\n", ipmi_major
);
924 if (ipmi_major
== 0) {
928 rv
= ipmi_smi_watcher_register(&smi_watcher
);
930 unregister_chrdev(ipmi_major
, DEVICE_NAME
);
931 class_destroy(ipmi_class
);
932 printk(KERN_WARNING
"ipmi: can't register smi watcher\n");
938 module_init(init_ipmi_devintf
);
940 static __exit
void cleanup_ipmi(void)
942 struct ipmi_reg_list
*entry
, *entry2
;
943 mutex_lock(®_list_mutex
);
944 list_for_each_entry_safe(entry
, entry2
, ®_list
, link
) {
945 list_del(&entry
->link
);
946 device_destroy(ipmi_class
, entry
->dev
);
949 mutex_unlock(®_list_mutex
);
950 class_destroy(ipmi_class
);
951 ipmi_smi_watcher_unregister(&smi_watcher
);
952 unregister_chrdev(ipmi_major
, DEVICE_NAME
);
954 module_exit(cleanup_ipmi
);
956 MODULE_LICENSE("GPL");
957 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
958 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
959 MODULE_ALIAS("platform:ipmi_si");