spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / char / ipmi / ipmi_devintf.c
blob2aa3977aae5e35d28f897416d89e53731a5c38ee
1 /*
2 * ipmi_devintf.c
4 * Linux device interface for the IPMI message handler.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/poll.h>
39 #include <linux/sched.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/mutex.h>
44 #include <linux/init.h>
45 #include <linux/device.h>
46 #include <linux/compat.h>
48 struct ipmi_file_private
50 ipmi_user_t user;
51 spinlock_t recv_msg_lock;
52 struct list_head recv_msgs;
53 struct file *file;
54 struct fasync_struct *fasync_queue;
55 wait_queue_head_t wait;
56 struct mutex recv_mutex;
57 int default_retries;
58 unsigned int default_retry_time_ms;
61 static DEFINE_MUTEX(ipmi_mutex);
62 static void file_receive_handler(struct ipmi_recv_msg *msg,
63 void *handler_data)
65 struct ipmi_file_private *priv = handler_data;
66 int was_empty;
67 unsigned long flags;
69 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
71 was_empty = list_empty(&(priv->recv_msgs));
72 list_add_tail(&(msg->link), &(priv->recv_msgs));
74 if (was_empty) {
75 wake_up_interruptible(&priv->wait);
76 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
79 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
82 static unsigned int ipmi_poll(struct file *file, poll_table *wait)
84 struct ipmi_file_private *priv = file->private_data;
85 unsigned int mask = 0;
86 unsigned long flags;
88 poll_wait(file, &priv->wait, wait);
90 spin_lock_irqsave(&priv->recv_msg_lock, flags);
92 if (!list_empty(&(priv->recv_msgs)))
93 mask |= (POLLIN | POLLRDNORM);
95 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
97 return mask;
100 static int ipmi_fasync(int fd, struct file *file, int on)
102 struct ipmi_file_private *priv = file->private_data;
103 int result;
105 mutex_lock(&ipmi_mutex); /* could race against open() otherwise */
106 result = fasync_helper(fd, file, on, &priv->fasync_queue);
107 mutex_unlock(&ipmi_mutex);
109 return (result);
112 static struct ipmi_user_hndl ipmi_hndlrs =
114 .ipmi_recv_hndl = file_receive_handler,
117 static int ipmi_open(struct inode *inode, struct file *file)
119 int if_num = iminor(inode);
120 int rv;
121 struct ipmi_file_private *priv;
124 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
125 if (!priv)
126 return -ENOMEM;
128 mutex_lock(&ipmi_mutex);
129 priv->file = file;
131 rv = ipmi_create_user(if_num,
132 &ipmi_hndlrs,
133 priv,
134 &(priv->user));
135 if (rv) {
136 kfree(priv);
137 goto out;
140 file->private_data = priv;
142 spin_lock_init(&(priv->recv_msg_lock));
143 INIT_LIST_HEAD(&(priv->recv_msgs));
144 init_waitqueue_head(&priv->wait);
145 priv->fasync_queue = NULL;
146 mutex_init(&priv->recv_mutex);
148 /* Use the low-level defaults. */
149 priv->default_retries = -1;
150 priv->default_retry_time_ms = 0;
152 out:
153 mutex_unlock(&ipmi_mutex);
154 return rv;
157 static int ipmi_release(struct inode *inode, struct file *file)
159 struct ipmi_file_private *priv = file->private_data;
160 int rv;
162 rv = ipmi_destroy_user(priv->user);
163 if (rv)
164 return rv;
166 /* FIXME - free the messages in the list. */
167 kfree(priv);
169 return 0;
172 static int handle_send_req(ipmi_user_t user,
173 struct ipmi_req *req,
174 int retries,
175 unsigned int retry_time_ms)
177 int rv;
178 struct ipmi_addr addr;
179 struct kernel_ipmi_msg msg;
181 if (req->addr_len > sizeof(struct ipmi_addr))
182 return -EINVAL;
184 if (copy_from_user(&addr, req->addr, req->addr_len))
185 return -EFAULT;
187 msg.netfn = req->msg.netfn;
188 msg.cmd = req->msg.cmd;
189 msg.data_len = req->msg.data_len;
190 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
191 if (!msg.data)
192 return -ENOMEM;
194 /* From here out we cannot return, we must jump to "out" for
195 error exits to free msgdata. */
197 rv = ipmi_validate_addr(&addr, req->addr_len);
198 if (rv)
199 goto out;
201 if (req->msg.data != NULL) {
202 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
203 rv = -EMSGSIZE;
204 goto out;
207 if (copy_from_user(msg.data,
208 req->msg.data,
209 req->msg.data_len))
211 rv = -EFAULT;
212 goto out;
214 } else {
215 msg.data_len = 0;
218 rv = ipmi_request_settime(user,
219 &addr,
220 req->msgid,
221 &msg,
222 NULL,
224 retries,
225 retry_time_ms);
226 out:
227 kfree(msg.data);
228 return rv;
231 static int ipmi_ioctl(struct file *file,
232 unsigned int cmd,
233 unsigned long data)
235 int rv = -EINVAL;
236 struct ipmi_file_private *priv = file->private_data;
237 void __user *arg = (void __user *)data;
239 switch (cmd)
241 case IPMICTL_SEND_COMMAND:
243 struct ipmi_req req;
245 if (copy_from_user(&req, arg, sizeof(req))) {
246 rv = -EFAULT;
247 break;
250 rv = handle_send_req(priv->user,
251 &req,
252 priv->default_retries,
253 priv->default_retry_time_ms);
254 break;
257 case IPMICTL_SEND_COMMAND_SETTIME:
259 struct ipmi_req_settime req;
261 if (copy_from_user(&req, arg, sizeof(req))) {
262 rv = -EFAULT;
263 break;
266 rv = handle_send_req(priv->user,
267 &req.req,
268 req.retries,
269 req.retry_time_ms);
270 break;
273 case IPMICTL_RECEIVE_MSG:
274 case IPMICTL_RECEIVE_MSG_TRUNC:
276 struct ipmi_recv rsp;
277 int addr_len;
278 struct list_head *entry;
279 struct ipmi_recv_msg *msg;
280 unsigned long flags;
283 rv = 0;
284 if (copy_from_user(&rsp, arg, sizeof(rsp))) {
285 rv = -EFAULT;
286 break;
289 /* We claim a mutex because we don't want two
290 users getting something from the queue at a time.
291 Since we have to release the spinlock before we can
292 copy the data to the user, it's possible another
293 user will grab something from the queue, too. Then
294 the messages might get out of order if something
295 fails and the message gets put back onto the
296 queue. This mutex prevents that problem. */
297 mutex_lock(&priv->recv_mutex);
299 /* Grab the message off the list. */
300 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
301 if (list_empty(&(priv->recv_msgs))) {
302 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
303 rv = -EAGAIN;
304 goto recv_err;
306 entry = priv->recv_msgs.next;
307 msg = list_entry(entry, struct ipmi_recv_msg, link);
308 list_del(entry);
309 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
311 addr_len = ipmi_addr_length(msg->addr.addr_type);
312 if (rsp.addr_len < addr_len)
314 rv = -EINVAL;
315 goto recv_putback_on_err;
318 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
319 rv = -EFAULT;
320 goto recv_putback_on_err;
322 rsp.addr_len = addr_len;
324 rsp.recv_type = msg->recv_type;
325 rsp.msgid = msg->msgid;
326 rsp.msg.netfn = msg->msg.netfn;
327 rsp.msg.cmd = msg->msg.cmd;
329 if (msg->msg.data_len > 0) {
330 if (rsp.msg.data_len < msg->msg.data_len) {
331 rv = -EMSGSIZE;
332 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
333 msg->msg.data_len = rsp.msg.data_len;
334 } else {
335 goto recv_putback_on_err;
339 if (copy_to_user(rsp.msg.data,
340 msg->msg.data,
341 msg->msg.data_len))
343 rv = -EFAULT;
344 goto recv_putback_on_err;
346 rsp.msg.data_len = msg->msg.data_len;
347 } else {
348 rsp.msg.data_len = 0;
351 if (copy_to_user(arg, &rsp, sizeof(rsp))) {
352 rv = -EFAULT;
353 goto recv_putback_on_err;
356 mutex_unlock(&priv->recv_mutex);
357 ipmi_free_recv_msg(msg);
358 break;
360 recv_putback_on_err:
361 /* If we got an error, put the message back onto
362 the head of the queue. */
363 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
364 list_add(entry, &(priv->recv_msgs));
365 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
366 mutex_unlock(&priv->recv_mutex);
367 break;
369 recv_err:
370 mutex_unlock(&priv->recv_mutex);
371 break;
374 case IPMICTL_REGISTER_FOR_CMD:
376 struct ipmi_cmdspec val;
378 if (copy_from_user(&val, arg, sizeof(val))) {
379 rv = -EFAULT;
380 break;
383 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
384 IPMI_CHAN_ALL);
385 break;
388 case IPMICTL_UNREGISTER_FOR_CMD:
390 struct ipmi_cmdspec val;
392 if (copy_from_user(&val, arg, sizeof(val))) {
393 rv = -EFAULT;
394 break;
397 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
398 IPMI_CHAN_ALL);
399 break;
402 case IPMICTL_REGISTER_FOR_CMD_CHANS:
404 struct ipmi_cmdspec_chans val;
406 if (copy_from_user(&val, arg, sizeof(val))) {
407 rv = -EFAULT;
408 break;
411 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
412 val.chans);
413 break;
416 case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
418 struct ipmi_cmdspec_chans val;
420 if (copy_from_user(&val, arg, sizeof(val))) {
421 rv = -EFAULT;
422 break;
425 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
426 val.chans);
427 break;
430 case IPMICTL_SET_GETS_EVENTS_CMD:
432 int val;
434 if (copy_from_user(&val, arg, sizeof(val))) {
435 rv = -EFAULT;
436 break;
439 rv = ipmi_set_gets_events(priv->user, val);
440 break;
443 /* The next four are legacy, not per-channel. */
444 case IPMICTL_SET_MY_ADDRESS_CMD:
446 unsigned int val;
448 if (copy_from_user(&val, arg, sizeof(val))) {
449 rv = -EFAULT;
450 break;
453 rv = ipmi_set_my_address(priv->user, 0, val);
454 break;
457 case IPMICTL_GET_MY_ADDRESS_CMD:
459 unsigned int val;
460 unsigned char rval;
462 rv = ipmi_get_my_address(priv->user, 0, &rval);
463 if (rv)
464 break;
466 val = rval;
468 if (copy_to_user(arg, &val, sizeof(val))) {
469 rv = -EFAULT;
470 break;
472 break;
475 case IPMICTL_SET_MY_LUN_CMD:
477 unsigned int val;
479 if (copy_from_user(&val, arg, sizeof(val))) {
480 rv = -EFAULT;
481 break;
484 rv = ipmi_set_my_LUN(priv->user, 0, val);
485 break;
488 case IPMICTL_GET_MY_LUN_CMD:
490 unsigned int val;
491 unsigned char rval;
493 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
494 if (rv)
495 break;
497 val = rval;
499 if (copy_to_user(arg, &val, sizeof(val))) {
500 rv = -EFAULT;
501 break;
503 break;
506 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
508 struct ipmi_channel_lun_address_set val;
510 if (copy_from_user(&val, arg, sizeof(val))) {
511 rv = -EFAULT;
512 break;
515 return ipmi_set_my_address(priv->user, val.channel, val.value);
516 break;
519 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
521 struct ipmi_channel_lun_address_set val;
523 if (copy_from_user(&val, arg, sizeof(val))) {
524 rv = -EFAULT;
525 break;
528 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
529 if (rv)
530 break;
532 if (copy_to_user(arg, &val, sizeof(val))) {
533 rv = -EFAULT;
534 break;
536 break;
539 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
541 struct ipmi_channel_lun_address_set val;
543 if (copy_from_user(&val, arg, sizeof(val))) {
544 rv = -EFAULT;
545 break;
548 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
549 break;
552 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
554 struct ipmi_channel_lun_address_set val;
556 if (copy_from_user(&val, arg, sizeof(val))) {
557 rv = -EFAULT;
558 break;
561 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
562 if (rv)
563 break;
565 if (copy_to_user(arg, &val, sizeof(val))) {
566 rv = -EFAULT;
567 break;
569 break;
572 case IPMICTL_SET_TIMING_PARMS_CMD:
574 struct ipmi_timing_parms parms;
576 if (copy_from_user(&parms, arg, sizeof(parms))) {
577 rv = -EFAULT;
578 break;
581 priv->default_retries = parms.retries;
582 priv->default_retry_time_ms = parms.retry_time_ms;
583 rv = 0;
584 break;
587 case IPMICTL_GET_TIMING_PARMS_CMD:
589 struct ipmi_timing_parms parms;
591 parms.retries = priv->default_retries;
592 parms.retry_time_ms = priv->default_retry_time_ms;
594 if (copy_to_user(arg, &parms, sizeof(parms))) {
595 rv = -EFAULT;
596 break;
599 rv = 0;
600 break;
603 case IPMICTL_GET_MAINTENANCE_MODE_CMD:
605 int mode;
607 mode = ipmi_get_maintenance_mode(priv->user);
608 if (copy_to_user(arg, &mode, sizeof(mode))) {
609 rv = -EFAULT;
610 break;
612 rv = 0;
613 break;
616 case IPMICTL_SET_MAINTENANCE_MODE_CMD:
618 int mode;
620 if (copy_from_user(&mode, arg, sizeof(mode))) {
621 rv = -EFAULT;
622 break;
624 rv = ipmi_set_maintenance_mode(priv->user, mode);
625 break;
629 return rv;
633 * Note: it doesn't make sense to take the BKL here but
634 * not in compat_ipmi_ioctl. -arnd
636 static long ipmi_unlocked_ioctl(struct file *file,
637 unsigned int cmd,
638 unsigned long data)
640 int ret;
642 mutex_lock(&ipmi_mutex);
643 ret = ipmi_ioctl(file, cmd, data);
644 mutex_unlock(&ipmi_mutex);
646 return ret;
649 #ifdef CONFIG_COMPAT
652 * The following code contains code for supporting 32-bit compatible
653 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
654 * 64-bit kernel
656 #define COMPAT_IPMICTL_SEND_COMMAND \
657 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
658 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
659 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
660 #define COMPAT_IPMICTL_RECEIVE_MSG \
661 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
662 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
663 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
665 struct compat_ipmi_msg {
666 u8 netfn;
667 u8 cmd;
668 u16 data_len;
669 compat_uptr_t data;
672 struct compat_ipmi_req {
673 compat_uptr_t addr;
674 compat_uint_t addr_len;
675 compat_long_t msgid;
676 struct compat_ipmi_msg msg;
679 struct compat_ipmi_recv {
680 compat_int_t recv_type;
681 compat_uptr_t addr;
682 compat_uint_t addr_len;
683 compat_long_t msgid;
684 struct compat_ipmi_msg msg;
687 struct compat_ipmi_req_settime {
688 struct compat_ipmi_req req;
689 compat_int_t retries;
690 compat_uint_t retry_time_ms;
694 * Define some helper functions for copying IPMI data
696 static long get_compat_ipmi_msg(struct ipmi_msg *p64,
697 struct compat_ipmi_msg __user *p32)
699 compat_uptr_t tmp;
701 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
702 __get_user(p64->netfn, &p32->netfn) ||
703 __get_user(p64->cmd, &p32->cmd) ||
704 __get_user(p64->data_len, &p32->data_len) ||
705 __get_user(tmp, &p32->data))
706 return -EFAULT;
707 p64->data = compat_ptr(tmp);
708 return 0;
711 static long put_compat_ipmi_msg(struct ipmi_msg *p64,
712 struct compat_ipmi_msg __user *p32)
714 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
715 __put_user(p64->netfn, &p32->netfn) ||
716 __put_user(p64->cmd, &p32->cmd) ||
717 __put_user(p64->data_len, &p32->data_len))
718 return -EFAULT;
719 return 0;
722 static long get_compat_ipmi_req(struct ipmi_req *p64,
723 struct compat_ipmi_req __user *p32)
726 compat_uptr_t tmp;
728 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
729 __get_user(tmp, &p32->addr) ||
730 __get_user(p64->addr_len, &p32->addr_len) ||
731 __get_user(p64->msgid, &p32->msgid) ||
732 get_compat_ipmi_msg(&p64->msg, &p32->msg))
733 return -EFAULT;
734 p64->addr = compat_ptr(tmp);
735 return 0;
738 static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
739 struct compat_ipmi_req_settime __user *p32)
741 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
742 get_compat_ipmi_req(&p64->req, &p32->req) ||
743 __get_user(p64->retries, &p32->retries) ||
744 __get_user(p64->retry_time_ms, &p32->retry_time_ms))
745 return -EFAULT;
746 return 0;
749 static long get_compat_ipmi_recv(struct ipmi_recv *p64,
750 struct compat_ipmi_recv __user *p32)
752 compat_uptr_t tmp;
754 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
755 __get_user(p64->recv_type, &p32->recv_type) ||
756 __get_user(tmp, &p32->addr) ||
757 __get_user(p64->addr_len, &p32->addr_len) ||
758 __get_user(p64->msgid, &p32->msgid) ||
759 get_compat_ipmi_msg(&p64->msg, &p32->msg))
760 return -EFAULT;
761 p64->addr = compat_ptr(tmp);
762 return 0;
765 static long put_compat_ipmi_recv(struct ipmi_recv *p64,
766 struct compat_ipmi_recv __user *p32)
768 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
769 __put_user(p64->recv_type, &p32->recv_type) ||
770 __put_user(p64->addr_len, &p32->addr_len) ||
771 __put_user(p64->msgid, &p32->msgid) ||
772 put_compat_ipmi_msg(&p64->msg, &p32->msg))
773 return -EFAULT;
774 return 0;
778 * Handle compatibility ioctls
780 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
781 unsigned long arg)
783 int rc;
784 struct ipmi_file_private *priv = filep->private_data;
786 switch(cmd) {
787 case COMPAT_IPMICTL_SEND_COMMAND:
789 struct ipmi_req rp;
791 if (get_compat_ipmi_req(&rp, compat_ptr(arg)))
792 return -EFAULT;
794 return handle_send_req(priv->user, &rp,
795 priv->default_retries,
796 priv->default_retry_time_ms);
798 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
800 struct ipmi_req_settime sp;
802 if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg)))
803 return -EFAULT;
805 return handle_send_req(priv->user, &sp.req,
806 sp.retries, sp.retry_time_ms);
808 case COMPAT_IPMICTL_RECEIVE_MSG:
809 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
811 struct ipmi_recv __user *precv64;
812 struct ipmi_recv recv64;
814 if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
815 return -EFAULT;
817 precv64 = compat_alloc_user_space(sizeof(recv64));
818 if (copy_to_user(precv64, &recv64, sizeof(recv64)))
819 return -EFAULT;
821 rc = ipmi_ioctl(filep,
822 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
823 ? IPMICTL_RECEIVE_MSG
824 : IPMICTL_RECEIVE_MSG_TRUNC),
825 (unsigned long) precv64);
826 if (rc != 0)
827 return rc;
829 if (copy_from_user(&recv64, precv64, sizeof(recv64)))
830 return -EFAULT;
832 if (put_compat_ipmi_recv(&recv64, compat_ptr(arg)))
833 return -EFAULT;
835 return rc;
837 default:
838 return ipmi_ioctl(filep, cmd, arg);
841 #endif
843 static const struct file_operations ipmi_fops = {
844 .owner = THIS_MODULE,
845 .unlocked_ioctl = ipmi_unlocked_ioctl,
846 #ifdef CONFIG_COMPAT
847 .compat_ioctl = compat_ipmi_ioctl,
848 #endif
849 .open = ipmi_open,
850 .release = ipmi_release,
851 .fasync = ipmi_fasync,
852 .poll = ipmi_poll,
853 .llseek = noop_llseek,
856 #define DEVICE_NAME "ipmidev"
858 static int ipmi_major;
859 module_param(ipmi_major, int, 0);
860 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
861 " default, or if you set it to zero, it will choose the next"
862 " available device. Setting it to -1 will disable the"
863 " interface. Other values will set the major device number"
864 " to that value.");
866 /* Keep track of the devices that are registered. */
867 struct ipmi_reg_list {
868 dev_t dev;
869 struct list_head link;
871 static LIST_HEAD(reg_list);
872 static DEFINE_MUTEX(reg_list_mutex);
874 static struct class *ipmi_class;
876 static void ipmi_new_smi(int if_num, struct device *device)
878 dev_t dev = MKDEV(ipmi_major, if_num);
879 struct ipmi_reg_list *entry;
881 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
882 if (!entry) {
883 printk(KERN_ERR "ipmi_devintf: Unable to create the"
884 " ipmi class device link\n");
885 return;
887 entry->dev = dev;
889 mutex_lock(&reg_list_mutex);
890 device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
891 list_add(&entry->link, &reg_list);
892 mutex_unlock(&reg_list_mutex);
895 static void ipmi_smi_gone(int if_num)
897 dev_t dev = MKDEV(ipmi_major, if_num);
898 struct ipmi_reg_list *entry;
900 mutex_lock(&reg_list_mutex);
901 list_for_each_entry(entry, &reg_list, link) {
902 if (entry->dev == dev) {
903 list_del(&entry->link);
904 kfree(entry);
905 break;
908 device_destroy(ipmi_class, dev);
909 mutex_unlock(&reg_list_mutex);
912 static struct ipmi_smi_watcher smi_watcher =
914 .owner = THIS_MODULE,
915 .new_smi = ipmi_new_smi,
916 .smi_gone = ipmi_smi_gone,
919 static int __init init_ipmi_devintf(void)
921 int rv;
923 if (ipmi_major < 0)
924 return -EINVAL;
926 printk(KERN_INFO "ipmi device interface\n");
928 ipmi_class = class_create(THIS_MODULE, "ipmi");
929 if (IS_ERR(ipmi_class)) {
930 printk(KERN_ERR "ipmi: can't register device class\n");
931 return PTR_ERR(ipmi_class);
934 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
935 if (rv < 0) {
936 class_destroy(ipmi_class);
937 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
938 return rv;
941 if (ipmi_major == 0) {
942 ipmi_major = rv;
945 rv = ipmi_smi_watcher_register(&smi_watcher);
946 if (rv) {
947 unregister_chrdev(ipmi_major, DEVICE_NAME);
948 class_destroy(ipmi_class);
949 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
950 return rv;
953 return 0;
955 module_init(init_ipmi_devintf);
957 static void __exit cleanup_ipmi(void)
959 struct ipmi_reg_list *entry, *entry2;
960 mutex_lock(&reg_list_mutex);
961 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
962 list_del(&entry->link);
963 device_destroy(ipmi_class, entry->dev);
964 kfree(entry);
966 mutex_unlock(&reg_list_mutex);
967 class_destroy(ipmi_class);
968 ipmi_smi_watcher_unregister(&smi_watcher);
969 unregister_chrdev(ipmi_major, DEVICE_NAME);
971 module_exit(cleanup_ipmi);
973 MODULE_LICENSE("GPL");
974 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
975 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
976 MODULE_ALIAS("platform:ipmi_si");