1 // SPDX-License-Identifier: GPL-2.0+
5 * Linux device interface for the IPMI message handler.
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/errno.h>
17 #include <linux/poll.h>
18 #include <linux/sched.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/ipmi.h>
22 #include <linux/mutex.h>
23 #include <linux/init.h>
24 #include <linux/device.h>
25 #include <linux/compat.h>
27 struct ipmi_file_private
29 struct ipmi_user
*user
;
30 spinlock_t recv_msg_lock
;
31 struct list_head recv_msgs
;
33 struct fasync_struct
*fasync_queue
;
34 wait_queue_head_t wait
;
35 struct mutex recv_mutex
;
37 unsigned int default_retry_time_ms
;
40 static void file_receive_handler(struct ipmi_recv_msg
*msg
,
43 struct ipmi_file_private
*priv
= handler_data
;
47 spin_lock_irqsave(&priv
->recv_msg_lock
, flags
);
48 was_empty
= list_empty(&priv
->recv_msgs
);
49 list_add_tail(&msg
->link
, &priv
->recv_msgs
);
50 spin_unlock_irqrestore(&priv
->recv_msg_lock
, flags
);
53 wake_up_interruptible(&priv
->wait
);
54 kill_fasync(&priv
->fasync_queue
, SIGIO
, POLL_IN
);
58 static __poll_t
ipmi_poll(struct file
*file
, poll_table
*wait
)
60 struct ipmi_file_private
*priv
= file
->private_data
;
64 poll_wait(file
, &priv
->wait
, wait
);
66 spin_lock_irqsave(&priv
->recv_msg_lock
, flags
);
68 if (!list_empty(&priv
->recv_msgs
))
69 mask
|= (EPOLLIN
| EPOLLRDNORM
);
71 spin_unlock_irqrestore(&priv
->recv_msg_lock
, flags
);
76 static int ipmi_fasync(int fd
, struct file
*file
, int on
)
78 struct ipmi_file_private
*priv
= file
->private_data
;
80 return fasync_helper(fd
, file
, on
, &priv
->fasync_queue
);
83 static const struct ipmi_user_hndl ipmi_hndlrs
=
85 .ipmi_recv_hndl
= file_receive_handler
,
88 static int ipmi_open(struct inode
*inode
, struct file
*file
)
90 int if_num
= iminor(inode
);
92 struct ipmi_file_private
*priv
;
94 priv
= kmalloc(sizeof(*priv
), GFP_KERNEL
);
100 rv
= ipmi_create_user(if_num
,
109 file
->private_data
= priv
;
111 spin_lock_init(&priv
->recv_msg_lock
);
112 INIT_LIST_HEAD(&priv
->recv_msgs
);
113 init_waitqueue_head(&priv
->wait
);
114 priv
->fasync_queue
= NULL
;
115 mutex_init(&priv
->recv_mutex
);
117 /* Use the low-level defaults. */
118 priv
->default_retries
= -1;
119 priv
->default_retry_time_ms
= 0;
125 static int ipmi_release(struct inode
*inode
, struct file
*file
)
127 struct ipmi_file_private
*priv
= file
->private_data
;
129 struct ipmi_recv_msg
*msg
, *next
;
131 rv
= ipmi_destroy_user(priv
->user
);
135 list_for_each_entry_safe(msg
, next
, &priv
->recv_msgs
, link
)
136 ipmi_free_recv_msg(msg
);
143 static int handle_send_req(struct ipmi_user
*user
,
144 struct ipmi_req
*req
,
146 unsigned int retry_time_ms
)
149 struct ipmi_addr addr
;
150 struct kernel_ipmi_msg msg
;
152 if (req
->addr_len
> sizeof(struct ipmi_addr
))
155 if (copy_from_user(&addr
, req
->addr
, req
->addr_len
))
158 msg
.netfn
= req
->msg
.netfn
;
159 msg
.cmd
= req
->msg
.cmd
;
160 msg
.data_len
= req
->msg
.data_len
;
161 msg
.data
= kmalloc(IPMI_MAX_MSG_LENGTH
, GFP_KERNEL
);
165 /* From here out we cannot return, we must jump to "out" for
166 error exits to free msgdata. */
168 rv
= ipmi_validate_addr(&addr
, req
->addr_len
);
172 if (req
->msg
.data
!= NULL
) {
173 if (req
->msg
.data_len
> IPMI_MAX_MSG_LENGTH
) {
178 if (copy_from_user(msg
.data
,
180 req
->msg
.data_len
)) {
188 rv
= ipmi_request_settime(user
,
201 static int handle_recv(struct ipmi_file_private
*priv
,
202 bool trunc
, struct ipmi_recv
*rsp
,
203 int (*copyout
)(struct ipmi_recv
*, void __user
*),
207 struct list_head
*entry
;
208 struct ipmi_recv_msg
*msg
;
212 /* We claim a mutex because we don't want two
213 users getting something from the queue at a time.
214 Since we have to release the spinlock before we can
215 copy the data to the user, it's possible another
216 user will grab something from the queue, too. Then
217 the messages might get out of order if something
218 fails and the message gets put back onto the
219 queue. This mutex prevents that problem. */
220 mutex_lock(&priv
->recv_mutex
);
222 /* Grab the message off the list. */
223 spin_lock_irqsave(&priv
->recv_msg_lock
, flags
);
224 if (list_empty(&(priv
->recv_msgs
))) {
225 spin_unlock_irqrestore(&priv
->recv_msg_lock
, flags
);
229 entry
= priv
->recv_msgs
.next
;
230 msg
= list_entry(entry
, struct ipmi_recv_msg
, link
);
232 spin_unlock_irqrestore(&priv
->recv_msg_lock
, flags
);
234 addr_len
= ipmi_addr_length(msg
->addr
.addr_type
);
235 if (rsp
->addr_len
< addr_len
) {
237 goto recv_putback_on_err
;
240 if (copy_to_user(rsp
->addr
, &msg
->addr
, addr_len
)) {
242 goto recv_putback_on_err
;
244 rsp
->addr_len
= addr_len
;
246 rsp
->recv_type
= msg
->recv_type
;
247 rsp
->msgid
= msg
->msgid
;
248 rsp
->msg
.netfn
= msg
->msg
.netfn
;
249 rsp
->msg
.cmd
= msg
->msg
.cmd
;
251 if (msg
->msg
.data_len
> 0) {
252 if (rsp
->msg
.data_len
< msg
->msg
.data_len
) {
255 msg
->msg
.data_len
= rsp
->msg
.data_len
;
257 goto recv_putback_on_err
;
260 if (copy_to_user(rsp
->msg
.data
,
262 msg
->msg
.data_len
)) {
264 goto recv_putback_on_err
;
266 rsp
->msg
.data_len
= msg
->msg
.data_len
;
268 rsp
->msg
.data_len
= 0;
271 rv
= copyout(rsp
, to
);
273 goto recv_putback_on_err
;
275 mutex_unlock(&priv
->recv_mutex
);
276 ipmi_free_recv_msg(msg
);
280 /* If we got an error, put the message back onto
281 the head of the queue. */
282 spin_lock_irqsave(&priv
->recv_msg_lock
, flags
);
283 list_add(entry
, &priv
->recv_msgs
);
284 spin_unlock_irqrestore(&priv
->recv_msg_lock
, flags
);
286 mutex_unlock(&priv
->recv_mutex
);
290 static int copyout_recv(struct ipmi_recv
*rsp
, void __user
*to
)
292 return copy_to_user(to
, rsp
, sizeof(struct ipmi_recv
)) ? -EFAULT
: 0;
295 static long ipmi_ioctl(struct file
*file
,
300 struct ipmi_file_private
*priv
= file
->private_data
;
301 void __user
*arg
= (void __user
*)data
;
305 case IPMICTL_SEND_COMMAND
:
309 unsigned int retry_time_ms
;
311 if (copy_from_user(&req
, arg
, sizeof(req
))) {
316 mutex_lock(&priv
->recv_mutex
);
317 retries
= priv
->default_retries
;
318 retry_time_ms
= priv
->default_retry_time_ms
;
319 mutex_unlock(&priv
->recv_mutex
);
321 rv
= handle_send_req(priv
->user
, &req
, retries
, retry_time_ms
);
325 case IPMICTL_SEND_COMMAND_SETTIME
:
327 struct ipmi_req_settime req
;
329 if (copy_from_user(&req
, arg
, sizeof(req
))) {
334 rv
= handle_send_req(priv
->user
,
341 case IPMICTL_RECEIVE_MSG
:
342 case IPMICTL_RECEIVE_MSG_TRUNC
:
344 struct ipmi_recv rsp
;
346 if (copy_from_user(&rsp
, arg
, sizeof(rsp
)))
349 rv
= handle_recv(priv
, cmd
== IPMICTL_RECEIVE_MSG_TRUNC
,
350 &rsp
, copyout_recv
, arg
);
354 case IPMICTL_REGISTER_FOR_CMD
:
356 struct ipmi_cmdspec val
;
358 if (copy_from_user(&val
, arg
, sizeof(val
))) {
363 rv
= ipmi_register_for_cmd(priv
->user
, val
.netfn
, val
.cmd
,
368 case IPMICTL_UNREGISTER_FOR_CMD
:
370 struct ipmi_cmdspec val
;
372 if (copy_from_user(&val
, arg
, sizeof(val
))) {
377 rv
= ipmi_unregister_for_cmd(priv
->user
, val
.netfn
, val
.cmd
,
382 case IPMICTL_REGISTER_FOR_CMD_CHANS
:
384 struct ipmi_cmdspec_chans val
;
386 if (copy_from_user(&val
, arg
, sizeof(val
))) {
391 rv
= ipmi_register_for_cmd(priv
->user
, val
.netfn
, val
.cmd
,
396 case IPMICTL_UNREGISTER_FOR_CMD_CHANS
:
398 struct ipmi_cmdspec_chans val
;
400 if (copy_from_user(&val
, arg
, sizeof(val
))) {
405 rv
= ipmi_unregister_for_cmd(priv
->user
, val
.netfn
, val
.cmd
,
410 case IPMICTL_SET_GETS_EVENTS_CMD
:
414 if (copy_from_user(&val
, arg
, sizeof(val
))) {
419 rv
= ipmi_set_gets_events(priv
->user
, val
);
423 /* The next four are legacy, not per-channel. */
424 case IPMICTL_SET_MY_ADDRESS_CMD
:
428 if (copy_from_user(&val
, arg
, sizeof(val
))) {
433 rv
= ipmi_set_my_address(priv
->user
, 0, val
);
437 case IPMICTL_GET_MY_ADDRESS_CMD
:
442 rv
= ipmi_get_my_address(priv
->user
, 0, &rval
);
448 if (copy_to_user(arg
, &val
, sizeof(val
))) {
455 case IPMICTL_SET_MY_LUN_CMD
:
459 if (copy_from_user(&val
, arg
, sizeof(val
))) {
464 rv
= ipmi_set_my_LUN(priv
->user
, 0, val
);
468 case IPMICTL_GET_MY_LUN_CMD
:
473 rv
= ipmi_get_my_LUN(priv
->user
, 0, &rval
);
479 if (copy_to_user(arg
, &val
, sizeof(val
))) {
486 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD
:
488 struct ipmi_channel_lun_address_set val
;
490 if (copy_from_user(&val
, arg
, sizeof(val
))) {
495 return ipmi_set_my_address(priv
->user
, val
.channel
, val
.value
);
499 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD
:
501 struct ipmi_channel_lun_address_set val
;
503 if (copy_from_user(&val
, arg
, sizeof(val
))) {
508 rv
= ipmi_get_my_address(priv
->user
, val
.channel
, &val
.value
);
512 if (copy_to_user(arg
, &val
, sizeof(val
))) {
519 case IPMICTL_SET_MY_CHANNEL_LUN_CMD
:
521 struct ipmi_channel_lun_address_set val
;
523 if (copy_from_user(&val
, arg
, sizeof(val
))) {
528 rv
= ipmi_set_my_LUN(priv
->user
, val
.channel
, val
.value
);
532 case IPMICTL_GET_MY_CHANNEL_LUN_CMD
:
534 struct ipmi_channel_lun_address_set val
;
536 if (copy_from_user(&val
, arg
, sizeof(val
))) {
541 rv
= ipmi_get_my_LUN(priv
->user
, val
.channel
, &val
.value
);
545 if (copy_to_user(arg
, &val
, sizeof(val
))) {
552 case IPMICTL_SET_TIMING_PARMS_CMD
:
554 struct ipmi_timing_parms parms
;
556 if (copy_from_user(&parms
, arg
, sizeof(parms
))) {
561 mutex_lock(&priv
->recv_mutex
);
562 priv
->default_retries
= parms
.retries
;
563 priv
->default_retry_time_ms
= parms
.retry_time_ms
;
564 mutex_unlock(&priv
->recv_mutex
);
569 case IPMICTL_GET_TIMING_PARMS_CMD
:
571 struct ipmi_timing_parms parms
;
573 mutex_lock(&priv
->recv_mutex
);
574 parms
.retries
= priv
->default_retries
;
575 parms
.retry_time_ms
= priv
->default_retry_time_ms
;
576 mutex_unlock(&priv
->recv_mutex
);
578 if (copy_to_user(arg
, &parms
, sizeof(parms
))) {
587 case IPMICTL_GET_MAINTENANCE_MODE_CMD
:
591 mode
= ipmi_get_maintenance_mode(priv
->user
);
592 if (copy_to_user(arg
, &mode
, sizeof(mode
))) {
600 case IPMICTL_SET_MAINTENANCE_MODE_CMD
:
604 if (copy_from_user(&mode
, arg
, sizeof(mode
))) {
608 rv
= ipmi_set_maintenance_mode(priv
->user
, mode
);
622 * The following code contains code for supporting 32-bit compatible
623 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
626 #define COMPAT_IPMICTL_SEND_COMMAND \
627 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
628 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
629 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
630 #define COMPAT_IPMICTL_RECEIVE_MSG \
631 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
632 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
633 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
635 struct compat_ipmi_msg
{
642 struct compat_ipmi_req
{
644 compat_uint_t addr_len
;
646 struct compat_ipmi_msg msg
;
649 struct compat_ipmi_recv
{
650 compat_int_t recv_type
;
652 compat_uint_t addr_len
;
654 struct compat_ipmi_msg msg
;
657 struct compat_ipmi_req_settime
{
658 struct compat_ipmi_req req
;
659 compat_int_t retries
;
660 compat_uint_t retry_time_ms
;
664 * Define some helper functions for copying IPMI data
666 static void get_compat_ipmi_msg(struct ipmi_msg
*p64
,
667 struct compat_ipmi_msg
*p32
)
669 p64
->netfn
= p32
->netfn
;
671 p64
->data_len
= p32
->data_len
;
672 p64
->data
= compat_ptr(p32
->data
);
675 static void get_compat_ipmi_req(struct ipmi_req
*p64
,
676 struct compat_ipmi_req
*p32
)
678 p64
->addr
= compat_ptr(p32
->addr
);
679 p64
->addr_len
= p32
->addr_len
;
680 p64
->msgid
= p32
->msgid
;
681 get_compat_ipmi_msg(&p64
->msg
, &p32
->msg
);
684 static void get_compat_ipmi_req_settime(struct ipmi_req_settime
*p64
,
685 struct compat_ipmi_req_settime
*p32
)
687 get_compat_ipmi_req(&p64
->req
, &p32
->req
);
688 p64
->retries
= p32
->retries
;
689 p64
->retry_time_ms
= p32
->retry_time_ms
;
692 static void get_compat_ipmi_recv(struct ipmi_recv
*p64
,
693 struct compat_ipmi_recv
*p32
)
695 memset(p64
, 0, sizeof(struct ipmi_recv
));
696 p64
->recv_type
= p32
->recv_type
;
697 p64
->addr
= compat_ptr(p32
->addr
);
698 p64
->addr_len
= p32
->addr_len
;
699 p64
->msgid
= p32
->msgid
;
700 get_compat_ipmi_msg(&p64
->msg
, &p32
->msg
);
703 static int copyout_recv32(struct ipmi_recv
*p64
, void __user
*to
)
705 struct compat_ipmi_recv v32
;
706 memset(&v32
, 0, sizeof(struct compat_ipmi_recv
));
707 v32
.recv_type
= p64
->recv_type
;
708 v32
.addr
= ptr_to_compat(p64
->addr
);
709 v32
.addr_len
= p64
->addr_len
;
710 v32
.msgid
= p64
->msgid
;
711 v32
.msg
.netfn
= p64
->msg
.netfn
;
712 v32
.msg
.cmd
= p64
->msg
.cmd
;
713 v32
.msg
.data_len
= p64
->msg
.data_len
;
714 v32
.msg
.data
= ptr_to_compat(p64
->msg
.data
);
715 return copy_to_user(to
, &v32
, sizeof(v32
)) ? -EFAULT
: 0;
719 * Handle compatibility ioctls
721 static long compat_ipmi_ioctl(struct file
*filep
, unsigned int cmd
,
724 struct ipmi_file_private
*priv
= filep
->private_data
;
727 case COMPAT_IPMICTL_SEND_COMMAND
:
730 struct compat_ipmi_req r32
;
732 unsigned int retry_time_ms
;
734 if (copy_from_user(&r32
, compat_ptr(arg
), sizeof(r32
)))
737 get_compat_ipmi_req(&rp
, &r32
);
739 mutex_lock(&priv
->recv_mutex
);
740 retries
= priv
->default_retries
;
741 retry_time_ms
= priv
->default_retry_time_ms
;
742 mutex_unlock(&priv
->recv_mutex
);
744 return handle_send_req(priv
->user
, &rp
,
745 retries
, retry_time_ms
);
747 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME
:
749 struct ipmi_req_settime sp
;
750 struct compat_ipmi_req_settime sp32
;
752 if (copy_from_user(&sp32
, compat_ptr(arg
), sizeof(sp32
)))
755 get_compat_ipmi_req_settime(&sp
, &sp32
);
757 return handle_send_req(priv
->user
, &sp
.req
,
758 sp
.retries
, sp
.retry_time_ms
);
760 case COMPAT_IPMICTL_RECEIVE_MSG
:
761 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC
:
763 struct ipmi_recv recv64
;
764 struct compat_ipmi_recv recv32
;
766 if (copy_from_user(&recv32
, compat_ptr(arg
), sizeof(recv32
)))
769 get_compat_ipmi_recv(&recv64
, &recv32
);
771 return handle_recv(priv
,
772 cmd
== COMPAT_IPMICTL_RECEIVE_MSG_TRUNC
,
773 &recv64
, copyout_recv32
, compat_ptr(arg
));
776 return ipmi_ioctl(filep
, cmd
, arg
);
781 static const struct file_operations ipmi_fops
= {
782 .owner
= THIS_MODULE
,
783 .unlocked_ioctl
= ipmi_ioctl
,
785 .compat_ioctl
= compat_ipmi_ioctl
,
788 .release
= ipmi_release
,
789 .fasync
= ipmi_fasync
,
791 .llseek
= noop_llseek
,
794 #define DEVICE_NAME "ipmidev"
796 static int ipmi_major
;
797 module_param(ipmi_major
, int, 0);
798 MODULE_PARM_DESC(ipmi_major
, "Sets the major number of the IPMI device. By"
799 " default, or if you set it to zero, it will choose the next"
800 " available device. Setting it to -1 will disable the"
801 " interface. Other values will set the major device number"
804 /* Keep track of the devices that are registered. */
805 struct ipmi_reg_list
{
807 struct list_head link
;
809 static LIST_HEAD(reg_list
);
810 static DEFINE_MUTEX(reg_list_mutex
);
812 static struct class *ipmi_class
;
814 static void ipmi_new_smi(int if_num
, struct device
*device
)
816 dev_t dev
= MKDEV(ipmi_major
, if_num
);
817 struct ipmi_reg_list
*entry
;
819 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
821 printk(KERN_ERR
"ipmi_devintf: Unable to create the"
822 " ipmi class device link\n");
827 mutex_lock(®_list_mutex
);
828 device_create(ipmi_class
, device
, dev
, NULL
, "ipmi%d", if_num
);
829 list_add(&entry
->link
, ®_list
);
830 mutex_unlock(®_list_mutex
);
833 static void ipmi_smi_gone(int if_num
)
835 dev_t dev
= MKDEV(ipmi_major
, if_num
);
836 struct ipmi_reg_list
*entry
;
838 mutex_lock(®_list_mutex
);
839 list_for_each_entry(entry
, ®_list
, link
) {
840 if (entry
->dev
== dev
) {
841 list_del(&entry
->link
);
846 device_destroy(ipmi_class
, dev
);
847 mutex_unlock(®_list_mutex
);
850 static struct ipmi_smi_watcher smi_watcher
=
852 .owner
= THIS_MODULE
,
853 .new_smi
= ipmi_new_smi
,
854 .smi_gone
= ipmi_smi_gone
,
857 static int __init
init_ipmi_devintf(void)
864 printk(KERN_INFO
"ipmi device interface\n");
866 ipmi_class
= class_create(THIS_MODULE
, "ipmi");
867 if (IS_ERR(ipmi_class
)) {
868 printk(KERN_ERR
"ipmi: can't register device class\n");
869 return PTR_ERR(ipmi_class
);
872 rv
= register_chrdev(ipmi_major
, DEVICE_NAME
, &ipmi_fops
);
874 class_destroy(ipmi_class
);
875 printk(KERN_ERR
"ipmi: can't get major %d\n", ipmi_major
);
879 if (ipmi_major
== 0) {
883 rv
= ipmi_smi_watcher_register(&smi_watcher
);
885 unregister_chrdev(ipmi_major
, DEVICE_NAME
);
886 class_destroy(ipmi_class
);
887 printk(KERN_WARNING
"ipmi: can't register smi watcher\n");
893 module_init(init_ipmi_devintf
);
895 static void __exit
cleanup_ipmi(void)
897 struct ipmi_reg_list
*entry
, *entry2
;
898 mutex_lock(®_list_mutex
);
899 list_for_each_entry_safe(entry
, entry2
, ®_list
, link
) {
900 list_del(&entry
->link
);
901 device_destroy(ipmi_class
, entry
->dev
);
904 mutex_unlock(®_list_mutex
);
905 class_destroy(ipmi_class
);
906 ipmi_smi_watcher_unregister(&smi_watcher
);
907 unregister_chrdev(ipmi_major
, DEVICE_NAME
);
909 module_exit(cleanup_ipmi
);
911 MODULE_LICENSE("GPL");
912 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
913 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");