Linux 4.16.11
[linux/fpc-iii.git] / drivers / char / ipmi / ipmi_devintf.c
blob5f1bc91747358db9f11b7b93f6f4fad7cde2183e
1 /*
2 * ipmi_devintf.c
4 * Linux device interface for the IPMI message handler.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/errno.h>
37 #include <linux/poll.h>
38 #include <linux/sched.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/mutex.h>
43 #include <linux/init.h>
44 #include <linux/device.h>
45 #include <linux/compat.h>
47 struct ipmi_file_private
49 ipmi_user_t user;
50 spinlock_t recv_msg_lock;
51 struct list_head recv_msgs;
52 struct file *file;
53 struct fasync_struct *fasync_queue;
54 wait_queue_head_t wait;
55 struct mutex recv_mutex;
56 int default_retries;
57 unsigned int default_retry_time_ms;
60 static DEFINE_MUTEX(ipmi_mutex);
61 static void file_receive_handler(struct ipmi_recv_msg *msg,
62 void *handler_data)
64 struct ipmi_file_private *priv = handler_data;
65 int was_empty;
66 unsigned long flags;
68 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
70 was_empty = list_empty(&(priv->recv_msgs));
71 list_add_tail(&(msg->link), &(priv->recv_msgs));
73 if (was_empty) {
74 wake_up_interruptible(&priv->wait);
75 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
78 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
81 static __poll_t ipmi_poll(struct file *file, poll_table *wait)
83 struct ipmi_file_private *priv = file->private_data;
84 __poll_t mask = 0;
85 unsigned long flags;
87 poll_wait(file, &priv->wait, wait);
89 spin_lock_irqsave(&priv->recv_msg_lock, flags);
91 if (!list_empty(&(priv->recv_msgs)))
92 mask |= (EPOLLIN | EPOLLRDNORM);
94 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
96 return mask;
99 static int ipmi_fasync(int fd, struct file *file, int on)
101 struct ipmi_file_private *priv = file->private_data;
102 int result;
104 mutex_lock(&ipmi_mutex); /* could race against open() otherwise */
105 result = fasync_helper(fd, file, on, &priv->fasync_queue);
106 mutex_unlock(&ipmi_mutex);
108 return (result);
111 static const struct ipmi_user_hndl ipmi_hndlrs =
113 .ipmi_recv_hndl = file_receive_handler,
116 static int ipmi_open(struct inode *inode, struct file *file)
118 int if_num = iminor(inode);
119 int rv;
120 struct ipmi_file_private *priv;
123 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
124 if (!priv)
125 return -ENOMEM;
127 mutex_lock(&ipmi_mutex);
128 priv->file = file;
130 rv = ipmi_create_user(if_num,
131 &ipmi_hndlrs,
132 priv,
133 &(priv->user));
134 if (rv) {
135 kfree(priv);
136 goto out;
139 file->private_data = priv;
141 spin_lock_init(&(priv->recv_msg_lock));
142 INIT_LIST_HEAD(&(priv->recv_msgs));
143 init_waitqueue_head(&priv->wait);
144 priv->fasync_queue = NULL;
145 mutex_init(&priv->recv_mutex);
147 /* Use the low-level defaults. */
148 priv->default_retries = -1;
149 priv->default_retry_time_ms = 0;
151 out:
152 mutex_unlock(&ipmi_mutex);
153 return rv;
156 static int ipmi_release(struct inode *inode, struct file *file)
158 struct ipmi_file_private *priv = file->private_data;
159 int rv;
160 struct ipmi_recv_msg *msg, *next;
162 rv = ipmi_destroy_user(priv->user);
163 if (rv)
164 return rv;
166 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
167 ipmi_free_recv_msg(msg);
170 kfree(priv);
172 return 0;
175 static int handle_send_req(ipmi_user_t user,
176 struct ipmi_req *req,
177 int retries,
178 unsigned int retry_time_ms)
180 int rv;
181 struct ipmi_addr addr;
182 struct kernel_ipmi_msg msg;
184 if (req->addr_len > sizeof(struct ipmi_addr))
185 return -EINVAL;
187 if (copy_from_user(&addr, req->addr, req->addr_len))
188 return -EFAULT;
190 msg.netfn = req->msg.netfn;
191 msg.cmd = req->msg.cmd;
192 msg.data_len = req->msg.data_len;
193 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
194 if (!msg.data)
195 return -ENOMEM;
197 /* From here out we cannot return, we must jump to "out" for
198 error exits to free msgdata. */
200 rv = ipmi_validate_addr(&addr, req->addr_len);
201 if (rv)
202 goto out;
204 if (req->msg.data != NULL) {
205 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
206 rv = -EMSGSIZE;
207 goto out;
210 if (copy_from_user(msg.data,
211 req->msg.data,
212 req->msg.data_len))
214 rv = -EFAULT;
215 goto out;
217 } else {
218 msg.data_len = 0;
221 rv = ipmi_request_settime(user,
222 &addr,
223 req->msgid,
224 &msg,
225 NULL,
227 retries,
228 retry_time_ms);
229 out:
230 kfree(msg.data);
231 return rv;
234 static int handle_recv(struct ipmi_file_private *priv,
235 bool trunc, struct ipmi_recv *rsp,
236 int (*copyout)(struct ipmi_recv *, void __user *),
237 void __user *to)
239 int addr_len;
240 struct list_head *entry;
241 struct ipmi_recv_msg *msg;
242 unsigned long flags;
243 int rv = 0;
245 /* We claim a mutex because we don't want two
246 users getting something from the queue at a time.
247 Since we have to release the spinlock before we can
248 copy the data to the user, it's possible another
249 user will grab something from the queue, too. Then
250 the messages might get out of order if something
251 fails and the message gets put back onto the
252 queue. This mutex prevents that problem. */
253 mutex_lock(&priv->recv_mutex);
255 /* Grab the message off the list. */
256 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
257 if (list_empty(&(priv->recv_msgs))) {
258 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
259 rv = -EAGAIN;
260 goto recv_err;
262 entry = priv->recv_msgs.next;
263 msg = list_entry(entry, struct ipmi_recv_msg, link);
264 list_del(entry);
265 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
267 addr_len = ipmi_addr_length(msg->addr.addr_type);
268 if (rsp->addr_len < addr_len)
270 rv = -EINVAL;
271 goto recv_putback_on_err;
274 if (copy_to_user(rsp->addr, &(msg->addr), addr_len)) {
275 rv = -EFAULT;
276 goto recv_putback_on_err;
278 rsp->addr_len = addr_len;
280 rsp->recv_type = msg->recv_type;
281 rsp->msgid = msg->msgid;
282 rsp->msg.netfn = msg->msg.netfn;
283 rsp->msg.cmd = msg->msg.cmd;
285 if (msg->msg.data_len > 0) {
286 if (rsp->msg.data_len < msg->msg.data_len) {
287 rv = -EMSGSIZE;
288 if (trunc)
289 msg->msg.data_len = rsp->msg.data_len;
290 else
291 goto recv_putback_on_err;
294 if (copy_to_user(rsp->msg.data,
295 msg->msg.data,
296 msg->msg.data_len))
298 rv = -EFAULT;
299 goto recv_putback_on_err;
301 rsp->msg.data_len = msg->msg.data_len;
302 } else {
303 rsp->msg.data_len = 0;
306 rv = copyout(rsp, to);
307 if (rv)
308 goto recv_putback_on_err;
310 mutex_unlock(&priv->recv_mutex);
311 ipmi_free_recv_msg(msg);
312 return 0;
314 recv_putback_on_err:
315 /* If we got an error, put the message back onto
316 the head of the queue. */
317 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
318 list_add(entry, &(priv->recv_msgs));
319 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
320 recv_err:
321 mutex_unlock(&priv->recv_mutex);
322 return rv;
325 static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
327 return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
330 static int ipmi_ioctl(struct file *file,
331 unsigned int cmd,
332 unsigned long data)
334 int rv = -EINVAL;
335 struct ipmi_file_private *priv = file->private_data;
336 void __user *arg = (void __user *)data;
338 switch (cmd)
340 case IPMICTL_SEND_COMMAND:
342 struct ipmi_req req;
344 if (copy_from_user(&req, arg, sizeof(req))) {
345 rv = -EFAULT;
346 break;
349 rv = handle_send_req(priv->user,
350 &req,
351 priv->default_retries,
352 priv->default_retry_time_ms);
353 break;
356 case IPMICTL_SEND_COMMAND_SETTIME:
358 struct ipmi_req_settime req;
360 if (copy_from_user(&req, arg, sizeof(req))) {
361 rv = -EFAULT;
362 break;
365 rv = handle_send_req(priv->user,
366 &req.req,
367 req.retries,
368 req.retry_time_ms);
369 break;
372 case IPMICTL_RECEIVE_MSG:
373 case IPMICTL_RECEIVE_MSG_TRUNC:
375 struct ipmi_recv rsp;
377 if (copy_from_user(&rsp, arg, sizeof(rsp)))
378 rv = -EFAULT;
379 else
380 rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
381 &rsp, copyout_recv, arg);
382 break;
385 case IPMICTL_REGISTER_FOR_CMD:
387 struct ipmi_cmdspec val;
389 if (copy_from_user(&val, arg, sizeof(val))) {
390 rv = -EFAULT;
391 break;
394 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
395 IPMI_CHAN_ALL);
396 break;
399 case IPMICTL_UNREGISTER_FOR_CMD:
401 struct ipmi_cmdspec val;
403 if (copy_from_user(&val, arg, sizeof(val))) {
404 rv = -EFAULT;
405 break;
408 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
409 IPMI_CHAN_ALL);
410 break;
413 case IPMICTL_REGISTER_FOR_CMD_CHANS:
415 struct ipmi_cmdspec_chans val;
417 if (copy_from_user(&val, arg, sizeof(val))) {
418 rv = -EFAULT;
419 break;
422 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
423 val.chans);
424 break;
427 case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
429 struct ipmi_cmdspec_chans val;
431 if (copy_from_user(&val, arg, sizeof(val))) {
432 rv = -EFAULT;
433 break;
436 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
437 val.chans);
438 break;
441 case IPMICTL_SET_GETS_EVENTS_CMD:
443 int val;
445 if (copy_from_user(&val, arg, sizeof(val))) {
446 rv = -EFAULT;
447 break;
450 rv = ipmi_set_gets_events(priv->user, val);
451 break;
454 /* The next four are legacy, not per-channel. */
455 case IPMICTL_SET_MY_ADDRESS_CMD:
457 unsigned int val;
459 if (copy_from_user(&val, arg, sizeof(val))) {
460 rv = -EFAULT;
461 break;
464 rv = ipmi_set_my_address(priv->user, 0, val);
465 break;
468 case IPMICTL_GET_MY_ADDRESS_CMD:
470 unsigned int val;
471 unsigned char rval;
473 rv = ipmi_get_my_address(priv->user, 0, &rval);
474 if (rv)
475 break;
477 val = rval;
479 if (copy_to_user(arg, &val, sizeof(val))) {
480 rv = -EFAULT;
481 break;
483 break;
486 case IPMICTL_SET_MY_LUN_CMD:
488 unsigned int val;
490 if (copy_from_user(&val, arg, sizeof(val))) {
491 rv = -EFAULT;
492 break;
495 rv = ipmi_set_my_LUN(priv->user, 0, val);
496 break;
499 case IPMICTL_GET_MY_LUN_CMD:
501 unsigned int val;
502 unsigned char rval;
504 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
505 if (rv)
506 break;
508 val = rval;
510 if (copy_to_user(arg, &val, sizeof(val))) {
511 rv = -EFAULT;
512 break;
514 break;
517 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
519 struct ipmi_channel_lun_address_set val;
521 if (copy_from_user(&val, arg, sizeof(val))) {
522 rv = -EFAULT;
523 break;
526 return ipmi_set_my_address(priv->user, val.channel, val.value);
527 break;
530 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
532 struct ipmi_channel_lun_address_set val;
534 if (copy_from_user(&val, arg, sizeof(val))) {
535 rv = -EFAULT;
536 break;
539 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
540 if (rv)
541 break;
543 if (copy_to_user(arg, &val, sizeof(val))) {
544 rv = -EFAULT;
545 break;
547 break;
550 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
552 struct ipmi_channel_lun_address_set val;
554 if (copy_from_user(&val, arg, sizeof(val))) {
555 rv = -EFAULT;
556 break;
559 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
560 break;
563 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
565 struct ipmi_channel_lun_address_set val;
567 if (copy_from_user(&val, arg, sizeof(val))) {
568 rv = -EFAULT;
569 break;
572 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
573 if (rv)
574 break;
576 if (copy_to_user(arg, &val, sizeof(val))) {
577 rv = -EFAULT;
578 break;
580 break;
583 case IPMICTL_SET_TIMING_PARMS_CMD:
585 struct ipmi_timing_parms parms;
587 if (copy_from_user(&parms, arg, sizeof(parms))) {
588 rv = -EFAULT;
589 break;
592 priv->default_retries = parms.retries;
593 priv->default_retry_time_ms = parms.retry_time_ms;
594 rv = 0;
595 break;
598 case IPMICTL_GET_TIMING_PARMS_CMD:
600 struct ipmi_timing_parms parms;
602 parms.retries = priv->default_retries;
603 parms.retry_time_ms = priv->default_retry_time_ms;
605 if (copy_to_user(arg, &parms, sizeof(parms))) {
606 rv = -EFAULT;
607 break;
610 rv = 0;
611 break;
614 case IPMICTL_GET_MAINTENANCE_MODE_CMD:
616 int mode;
618 mode = ipmi_get_maintenance_mode(priv->user);
619 if (copy_to_user(arg, &mode, sizeof(mode))) {
620 rv = -EFAULT;
621 break;
623 rv = 0;
624 break;
627 case IPMICTL_SET_MAINTENANCE_MODE_CMD:
629 int mode;
631 if (copy_from_user(&mode, arg, sizeof(mode))) {
632 rv = -EFAULT;
633 break;
635 rv = ipmi_set_maintenance_mode(priv->user, mode);
636 break;
640 return rv;
644 * Note: it doesn't make sense to take the BKL here but
645 * not in compat_ipmi_ioctl. -arnd
647 static long ipmi_unlocked_ioctl(struct file *file,
648 unsigned int cmd,
649 unsigned long data)
651 int ret;
653 mutex_lock(&ipmi_mutex);
654 ret = ipmi_ioctl(file, cmd, data);
655 mutex_unlock(&ipmi_mutex);
657 return ret;
660 #ifdef CONFIG_COMPAT
663 * The following code contains code for supporting 32-bit compatible
664 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
665 * 64-bit kernel
667 #define COMPAT_IPMICTL_SEND_COMMAND \
668 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
669 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
670 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
671 #define COMPAT_IPMICTL_RECEIVE_MSG \
672 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
673 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
674 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
676 struct compat_ipmi_msg {
677 u8 netfn;
678 u8 cmd;
679 u16 data_len;
680 compat_uptr_t data;
683 struct compat_ipmi_req {
684 compat_uptr_t addr;
685 compat_uint_t addr_len;
686 compat_long_t msgid;
687 struct compat_ipmi_msg msg;
690 struct compat_ipmi_recv {
691 compat_int_t recv_type;
692 compat_uptr_t addr;
693 compat_uint_t addr_len;
694 compat_long_t msgid;
695 struct compat_ipmi_msg msg;
698 struct compat_ipmi_req_settime {
699 struct compat_ipmi_req req;
700 compat_int_t retries;
701 compat_uint_t retry_time_ms;
705 * Define some helper functions for copying IPMI data
707 static void get_compat_ipmi_msg(struct ipmi_msg *p64,
708 struct compat_ipmi_msg *p32)
710 p64->netfn = p32->netfn;
711 p64->cmd = p32->cmd;
712 p64->data_len = p32->data_len;
713 p64->data = compat_ptr(p32->data);
716 static void get_compat_ipmi_req(struct ipmi_req *p64,
717 struct compat_ipmi_req *p32)
719 p64->addr = compat_ptr(p32->addr);
720 p64->addr_len = p32->addr_len;
721 p64->msgid = p32->msgid;
722 get_compat_ipmi_msg(&p64->msg, &p32->msg);
725 static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
726 struct compat_ipmi_req_settime *p32)
728 get_compat_ipmi_req(&p64->req, &p32->req);
729 p64->retries = p32->retries;
730 p64->retry_time_ms = p32->retry_time_ms;
733 static void get_compat_ipmi_recv(struct ipmi_recv *p64,
734 struct compat_ipmi_recv *p32)
736 memset(p64, 0, sizeof(struct ipmi_recv));
737 p64->recv_type = p32->recv_type;
738 p64->addr = compat_ptr(p32->addr);
739 p64->addr_len = p32->addr_len;
740 p64->msgid = p32->msgid;
741 get_compat_ipmi_msg(&p64->msg, &p32->msg);
744 static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
746 struct compat_ipmi_recv v32;
747 memset(&v32, 0, sizeof(struct compat_ipmi_recv));
748 v32.recv_type = p64->recv_type;
749 v32.addr = ptr_to_compat(p64->addr);
750 v32.addr_len = p64->addr_len;
751 v32.msgid = p64->msgid;
752 v32.msg.netfn = p64->msg.netfn;
753 v32.msg.cmd = p64->msg.cmd;
754 v32.msg.data_len = p64->msg.data_len;
755 v32.msg.data = ptr_to_compat(p64->msg.data);
756 return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
760 * Handle compatibility ioctls
762 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
763 unsigned long arg)
765 struct ipmi_file_private *priv = filep->private_data;
767 switch(cmd) {
768 case COMPAT_IPMICTL_SEND_COMMAND:
770 struct ipmi_req rp;
771 struct compat_ipmi_req r32;
773 if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
774 return -EFAULT;
776 get_compat_ipmi_req(&rp, &r32);
778 return handle_send_req(priv->user, &rp,
779 priv->default_retries,
780 priv->default_retry_time_ms);
782 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
784 struct ipmi_req_settime sp;
785 struct compat_ipmi_req_settime sp32;
787 if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
788 return -EFAULT;
790 get_compat_ipmi_req_settime(&sp, &sp32);
792 return handle_send_req(priv->user, &sp.req,
793 sp.retries, sp.retry_time_ms);
795 case COMPAT_IPMICTL_RECEIVE_MSG:
796 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
798 struct ipmi_recv recv64;
799 struct compat_ipmi_recv recv32;
801 if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
802 return -EFAULT;
804 get_compat_ipmi_recv(&recv64, &recv32);
806 return handle_recv(priv,
807 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
808 &recv64, copyout_recv32, compat_ptr(arg));
810 default:
811 return ipmi_ioctl(filep, cmd, arg);
815 static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
816 unsigned long arg)
818 int ret;
820 mutex_lock(&ipmi_mutex);
821 ret = compat_ipmi_ioctl(filep, cmd, arg);
822 mutex_unlock(&ipmi_mutex);
824 return ret;
826 #endif
828 static const struct file_operations ipmi_fops = {
829 .owner = THIS_MODULE,
830 .unlocked_ioctl = ipmi_unlocked_ioctl,
831 #ifdef CONFIG_COMPAT
832 .compat_ioctl = unlocked_compat_ipmi_ioctl,
833 #endif
834 .open = ipmi_open,
835 .release = ipmi_release,
836 .fasync = ipmi_fasync,
837 .poll = ipmi_poll,
838 .llseek = noop_llseek,
841 #define DEVICE_NAME "ipmidev"
843 static int ipmi_major;
844 module_param(ipmi_major, int, 0);
845 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
846 " default, or if you set it to zero, it will choose the next"
847 " available device. Setting it to -1 will disable the"
848 " interface. Other values will set the major device number"
849 " to that value.");
851 /* Keep track of the devices that are registered. */
852 struct ipmi_reg_list {
853 dev_t dev;
854 struct list_head link;
856 static LIST_HEAD(reg_list);
857 static DEFINE_MUTEX(reg_list_mutex);
859 static struct class *ipmi_class;
861 static void ipmi_new_smi(int if_num, struct device *device)
863 dev_t dev = MKDEV(ipmi_major, if_num);
864 struct ipmi_reg_list *entry;
866 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
867 if (!entry) {
868 printk(KERN_ERR "ipmi_devintf: Unable to create the"
869 " ipmi class device link\n");
870 return;
872 entry->dev = dev;
874 mutex_lock(&reg_list_mutex);
875 device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
876 list_add(&entry->link, &reg_list);
877 mutex_unlock(&reg_list_mutex);
880 static void ipmi_smi_gone(int if_num)
882 dev_t dev = MKDEV(ipmi_major, if_num);
883 struct ipmi_reg_list *entry;
885 mutex_lock(&reg_list_mutex);
886 list_for_each_entry(entry, &reg_list, link) {
887 if (entry->dev == dev) {
888 list_del(&entry->link);
889 kfree(entry);
890 break;
893 device_destroy(ipmi_class, dev);
894 mutex_unlock(&reg_list_mutex);
897 static struct ipmi_smi_watcher smi_watcher =
899 .owner = THIS_MODULE,
900 .new_smi = ipmi_new_smi,
901 .smi_gone = ipmi_smi_gone,
904 static int __init init_ipmi_devintf(void)
906 int rv;
908 if (ipmi_major < 0)
909 return -EINVAL;
911 printk(KERN_INFO "ipmi device interface\n");
913 ipmi_class = class_create(THIS_MODULE, "ipmi");
914 if (IS_ERR(ipmi_class)) {
915 printk(KERN_ERR "ipmi: can't register device class\n");
916 return PTR_ERR(ipmi_class);
919 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
920 if (rv < 0) {
921 class_destroy(ipmi_class);
922 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
923 return rv;
926 if (ipmi_major == 0) {
927 ipmi_major = rv;
930 rv = ipmi_smi_watcher_register(&smi_watcher);
931 if (rv) {
932 unregister_chrdev(ipmi_major, DEVICE_NAME);
933 class_destroy(ipmi_class);
934 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
935 return rv;
938 return 0;
940 module_init(init_ipmi_devintf);
942 static void __exit cleanup_ipmi(void)
944 struct ipmi_reg_list *entry, *entry2;
945 mutex_lock(&reg_list_mutex);
946 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
947 list_del(&entry->link);
948 device_destroy(ipmi_class, entry->dev);
949 kfree(entry);
951 mutex_unlock(&reg_list_mutex);
952 class_destroy(ipmi_class);
953 ipmi_smi_watcher_unregister(&smi_watcher);
954 unregister_chrdev(ipmi_major, DEVICE_NAME);
956 module_exit(cleanup_ipmi);
958 MODULE_LICENSE("GPL");
959 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
960 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");