PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / char / ipmi / ipmi_devintf.c
blobec318bf434a6c3d890d26060a9c388295bde807e
1 /*
2 * ipmi_devintf.c
4 * Linux device interface for the IPMI message handler.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/errno.h>
37 #include <linux/poll.h>
38 #include <linux/sched.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/mutex.h>
43 #include <linux/init.h>
44 #include <linux/device.h>
45 #include <linux/compat.h>
47 struct ipmi_file_private
49 ipmi_user_t user;
50 spinlock_t recv_msg_lock;
51 struct list_head recv_msgs;
52 struct file *file;
53 struct fasync_struct *fasync_queue;
54 wait_queue_head_t wait;
55 struct mutex recv_mutex;
56 int default_retries;
57 unsigned int default_retry_time_ms;
60 static DEFINE_MUTEX(ipmi_mutex);
61 static void file_receive_handler(struct ipmi_recv_msg *msg,
62 void *handler_data)
64 struct ipmi_file_private *priv = handler_data;
65 int was_empty;
66 unsigned long flags;
68 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
70 was_empty = list_empty(&(priv->recv_msgs));
71 list_add_tail(&(msg->link), &(priv->recv_msgs));
73 if (was_empty) {
74 wake_up_interruptible(&priv->wait);
75 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
78 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
81 static unsigned int ipmi_poll(struct file *file, poll_table *wait)
83 struct ipmi_file_private *priv = file->private_data;
84 unsigned int mask = 0;
85 unsigned long flags;
87 poll_wait(file, &priv->wait, wait);
89 spin_lock_irqsave(&priv->recv_msg_lock, flags);
91 if (!list_empty(&(priv->recv_msgs)))
92 mask |= (POLLIN | POLLRDNORM);
94 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
96 return mask;
99 static int ipmi_fasync(int fd, struct file *file, int on)
101 struct ipmi_file_private *priv = file->private_data;
102 int result;
104 mutex_lock(&ipmi_mutex); /* could race against open() otherwise */
105 result = fasync_helper(fd, file, on, &priv->fasync_queue);
106 mutex_unlock(&ipmi_mutex);
108 return (result);
111 static struct ipmi_user_hndl ipmi_hndlrs =
113 .ipmi_recv_hndl = file_receive_handler,
116 static int ipmi_open(struct inode *inode, struct file *file)
118 int if_num = iminor(inode);
119 int rv;
120 struct ipmi_file_private *priv;
123 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
124 if (!priv)
125 return -ENOMEM;
127 mutex_lock(&ipmi_mutex);
128 priv->file = file;
130 rv = ipmi_create_user(if_num,
131 &ipmi_hndlrs,
132 priv,
133 &(priv->user));
134 if (rv) {
135 kfree(priv);
136 goto out;
139 file->private_data = priv;
141 spin_lock_init(&(priv->recv_msg_lock));
142 INIT_LIST_HEAD(&(priv->recv_msgs));
143 init_waitqueue_head(&priv->wait);
144 priv->fasync_queue = NULL;
145 mutex_init(&priv->recv_mutex);
147 /* Use the low-level defaults. */
148 priv->default_retries = -1;
149 priv->default_retry_time_ms = 0;
151 out:
152 mutex_unlock(&ipmi_mutex);
153 return rv;
156 static int ipmi_release(struct inode *inode, struct file *file)
158 struct ipmi_file_private *priv = file->private_data;
159 int rv;
161 rv = ipmi_destroy_user(priv->user);
162 if (rv)
163 return rv;
165 /* FIXME - free the messages in the list. */
166 kfree(priv);
168 return 0;
171 static int handle_send_req(ipmi_user_t user,
172 struct ipmi_req *req,
173 int retries,
174 unsigned int retry_time_ms)
176 int rv;
177 struct ipmi_addr addr;
178 struct kernel_ipmi_msg msg;
180 if (req->addr_len > sizeof(struct ipmi_addr))
181 return -EINVAL;
183 if (copy_from_user(&addr, req->addr, req->addr_len))
184 return -EFAULT;
186 msg.netfn = req->msg.netfn;
187 msg.cmd = req->msg.cmd;
188 msg.data_len = req->msg.data_len;
189 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
190 if (!msg.data)
191 return -ENOMEM;
193 /* From here out we cannot return, we must jump to "out" for
194 error exits to free msgdata. */
196 rv = ipmi_validate_addr(&addr, req->addr_len);
197 if (rv)
198 goto out;
200 if (req->msg.data != NULL) {
201 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
202 rv = -EMSGSIZE;
203 goto out;
206 if (copy_from_user(msg.data,
207 req->msg.data,
208 req->msg.data_len))
210 rv = -EFAULT;
211 goto out;
213 } else {
214 msg.data_len = 0;
217 rv = ipmi_request_settime(user,
218 &addr,
219 req->msgid,
220 &msg,
221 NULL,
223 retries,
224 retry_time_ms);
225 out:
226 kfree(msg.data);
227 return rv;
230 static int ipmi_ioctl(struct file *file,
231 unsigned int cmd,
232 unsigned long data)
234 int rv = -EINVAL;
235 struct ipmi_file_private *priv = file->private_data;
236 void __user *arg = (void __user *)data;
238 switch (cmd)
240 case IPMICTL_SEND_COMMAND:
242 struct ipmi_req req;
244 if (copy_from_user(&req, arg, sizeof(req))) {
245 rv = -EFAULT;
246 break;
249 rv = handle_send_req(priv->user,
250 &req,
251 priv->default_retries,
252 priv->default_retry_time_ms);
253 break;
256 case IPMICTL_SEND_COMMAND_SETTIME:
258 struct ipmi_req_settime req;
260 if (copy_from_user(&req, arg, sizeof(req))) {
261 rv = -EFAULT;
262 break;
265 rv = handle_send_req(priv->user,
266 &req.req,
267 req.retries,
268 req.retry_time_ms);
269 break;
272 case IPMICTL_RECEIVE_MSG:
273 case IPMICTL_RECEIVE_MSG_TRUNC:
275 struct ipmi_recv rsp;
276 int addr_len;
277 struct list_head *entry;
278 struct ipmi_recv_msg *msg;
279 unsigned long flags;
282 rv = 0;
283 if (copy_from_user(&rsp, arg, sizeof(rsp))) {
284 rv = -EFAULT;
285 break;
288 /* We claim a mutex because we don't want two
289 users getting something from the queue at a time.
290 Since we have to release the spinlock before we can
291 copy the data to the user, it's possible another
292 user will grab something from the queue, too. Then
293 the messages might get out of order if something
294 fails and the message gets put back onto the
295 queue. This mutex prevents that problem. */
296 mutex_lock(&priv->recv_mutex);
298 /* Grab the message off the list. */
299 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
300 if (list_empty(&(priv->recv_msgs))) {
301 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
302 rv = -EAGAIN;
303 goto recv_err;
305 entry = priv->recv_msgs.next;
306 msg = list_entry(entry, struct ipmi_recv_msg, link);
307 list_del(entry);
308 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
310 addr_len = ipmi_addr_length(msg->addr.addr_type);
311 if (rsp.addr_len < addr_len)
313 rv = -EINVAL;
314 goto recv_putback_on_err;
317 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
318 rv = -EFAULT;
319 goto recv_putback_on_err;
321 rsp.addr_len = addr_len;
323 rsp.recv_type = msg->recv_type;
324 rsp.msgid = msg->msgid;
325 rsp.msg.netfn = msg->msg.netfn;
326 rsp.msg.cmd = msg->msg.cmd;
328 if (msg->msg.data_len > 0) {
329 if (rsp.msg.data_len < msg->msg.data_len) {
330 rv = -EMSGSIZE;
331 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
332 msg->msg.data_len = rsp.msg.data_len;
333 } else {
334 goto recv_putback_on_err;
338 if (copy_to_user(rsp.msg.data,
339 msg->msg.data,
340 msg->msg.data_len))
342 rv = -EFAULT;
343 goto recv_putback_on_err;
345 rsp.msg.data_len = msg->msg.data_len;
346 } else {
347 rsp.msg.data_len = 0;
350 if (copy_to_user(arg, &rsp, sizeof(rsp))) {
351 rv = -EFAULT;
352 goto recv_putback_on_err;
355 mutex_unlock(&priv->recv_mutex);
356 ipmi_free_recv_msg(msg);
357 break;
359 recv_putback_on_err:
360 /* If we got an error, put the message back onto
361 the head of the queue. */
362 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
363 list_add(entry, &(priv->recv_msgs));
364 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
365 mutex_unlock(&priv->recv_mutex);
366 break;
368 recv_err:
369 mutex_unlock(&priv->recv_mutex);
370 break;
373 case IPMICTL_REGISTER_FOR_CMD:
375 struct ipmi_cmdspec val;
377 if (copy_from_user(&val, arg, sizeof(val))) {
378 rv = -EFAULT;
379 break;
382 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
383 IPMI_CHAN_ALL);
384 break;
387 case IPMICTL_UNREGISTER_FOR_CMD:
389 struct ipmi_cmdspec val;
391 if (copy_from_user(&val, arg, sizeof(val))) {
392 rv = -EFAULT;
393 break;
396 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
397 IPMI_CHAN_ALL);
398 break;
401 case IPMICTL_REGISTER_FOR_CMD_CHANS:
403 struct ipmi_cmdspec_chans val;
405 if (copy_from_user(&val, arg, sizeof(val))) {
406 rv = -EFAULT;
407 break;
410 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
411 val.chans);
412 break;
415 case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
417 struct ipmi_cmdspec_chans val;
419 if (copy_from_user(&val, arg, sizeof(val))) {
420 rv = -EFAULT;
421 break;
424 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
425 val.chans);
426 break;
429 case IPMICTL_SET_GETS_EVENTS_CMD:
431 int val;
433 if (copy_from_user(&val, arg, sizeof(val))) {
434 rv = -EFAULT;
435 break;
438 rv = ipmi_set_gets_events(priv->user, val);
439 break;
442 /* The next four are legacy, not per-channel. */
443 case IPMICTL_SET_MY_ADDRESS_CMD:
445 unsigned int val;
447 if (copy_from_user(&val, arg, sizeof(val))) {
448 rv = -EFAULT;
449 break;
452 rv = ipmi_set_my_address(priv->user, 0, val);
453 break;
456 case IPMICTL_GET_MY_ADDRESS_CMD:
458 unsigned int val;
459 unsigned char rval;
461 rv = ipmi_get_my_address(priv->user, 0, &rval);
462 if (rv)
463 break;
465 val = rval;
467 if (copy_to_user(arg, &val, sizeof(val))) {
468 rv = -EFAULT;
469 break;
471 break;
474 case IPMICTL_SET_MY_LUN_CMD:
476 unsigned int val;
478 if (copy_from_user(&val, arg, sizeof(val))) {
479 rv = -EFAULT;
480 break;
483 rv = ipmi_set_my_LUN(priv->user, 0, val);
484 break;
487 case IPMICTL_GET_MY_LUN_CMD:
489 unsigned int val;
490 unsigned char rval;
492 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
493 if (rv)
494 break;
496 val = rval;
498 if (copy_to_user(arg, &val, sizeof(val))) {
499 rv = -EFAULT;
500 break;
502 break;
505 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
507 struct ipmi_channel_lun_address_set val;
509 if (copy_from_user(&val, arg, sizeof(val))) {
510 rv = -EFAULT;
511 break;
514 return ipmi_set_my_address(priv->user, val.channel, val.value);
515 break;
518 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
520 struct ipmi_channel_lun_address_set val;
522 if (copy_from_user(&val, arg, sizeof(val))) {
523 rv = -EFAULT;
524 break;
527 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
528 if (rv)
529 break;
531 if (copy_to_user(arg, &val, sizeof(val))) {
532 rv = -EFAULT;
533 break;
535 break;
538 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
540 struct ipmi_channel_lun_address_set val;
542 if (copy_from_user(&val, arg, sizeof(val))) {
543 rv = -EFAULT;
544 break;
547 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
548 break;
551 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
553 struct ipmi_channel_lun_address_set val;
555 if (copy_from_user(&val, arg, sizeof(val))) {
556 rv = -EFAULT;
557 break;
560 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
561 if (rv)
562 break;
564 if (copy_to_user(arg, &val, sizeof(val))) {
565 rv = -EFAULT;
566 break;
568 break;
571 case IPMICTL_SET_TIMING_PARMS_CMD:
573 struct ipmi_timing_parms parms;
575 if (copy_from_user(&parms, arg, sizeof(parms))) {
576 rv = -EFAULT;
577 break;
580 priv->default_retries = parms.retries;
581 priv->default_retry_time_ms = parms.retry_time_ms;
582 rv = 0;
583 break;
586 case IPMICTL_GET_TIMING_PARMS_CMD:
588 struct ipmi_timing_parms parms;
590 parms.retries = priv->default_retries;
591 parms.retry_time_ms = priv->default_retry_time_ms;
593 if (copy_to_user(arg, &parms, sizeof(parms))) {
594 rv = -EFAULT;
595 break;
598 rv = 0;
599 break;
602 case IPMICTL_GET_MAINTENANCE_MODE_CMD:
604 int mode;
606 mode = ipmi_get_maintenance_mode(priv->user);
607 if (copy_to_user(arg, &mode, sizeof(mode))) {
608 rv = -EFAULT;
609 break;
611 rv = 0;
612 break;
615 case IPMICTL_SET_MAINTENANCE_MODE_CMD:
617 int mode;
619 if (copy_from_user(&mode, arg, sizeof(mode))) {
620 rv = -EFAULT;
621 break;
623 rv = ipmi_set_maintenance_mode(priv->user, mode);
624 break;
628 return rv;
632 * Note: it doesn't make sense to take the BKL here but
633 * not in compat_ipmi_ioctl. -arnd
635 static long ipmi_unlocked_ioctl(struct file *file,
636 unsigned int cmd,
637 unsigned long data)
639 int ret;
641 mutex_lock(&ipmi_mutex);
642 ret = ipmi_ioctl(file, cmd, data);
643 mutex_unlock(&ipmi_mutex);
645 return ret;
648 #ifdef CONFIG_COMPAT
651 * The following code contains code for supporting 32-bit compatible
652 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
653 * 64-bit kernel
655 #define COMPAT_IPMICTL_SEND_COMMAND \
656 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
657 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
658 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
659 #define COMPAT_IPMICTL_RECEIVE_MSG \
660 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
661 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
662 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
664 struct compat_ipmi_msg {
665 u8 netfn;
666 u8 cmd;
667 u16 data_len;
668 compat_uptr_t data;
671 struct compat_ipmi_req {
672 compat_uptr_t addr;
673 compat_uint_t addr_len;
674 compat_long_t msgid;
675 struct compat_ipmi_msg msg;
678 struct compat_ipmi_recv {
679 compat_int_t recv_type;
680 compat_uptr_t addr;
681 compat_uint_t addr_len;
682 compat_long_t msgid;
683 struct compat_ipmi_msg msg;
686 struct compat_ipmi_req_settime {
687 struct compat_ipmi_req req;
688 compat_int_t retries;
689 compat_uint_t retry_time_ms;
693 * Define some helper functions for copying IPMI data
695 static long get_compat_ipmi_msg(struct ipmi_msg *p64,
696 struct compat_ipmi_msg __user *p32)
698 compat_uptr_t tmp;
700 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
701 __get_user(p64->netfn, &p32->netfn) ||
702 __get_user(p64->cmd, &p32->cmd) ||
703 __get_user(p64->data_len, &p32->data_len) ||
704 __get_user(tmp, &p32->data))
705 return -EFAULT;
706 p64->data = compat_ptr(tmp);
707 return 0;
710 static long put_compat_ipmi_msg(struct ipmi_msg *p64,
711 struct compat_ipmi_msg __user *p32)
713 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
714 __put_user(p64->netfn, &p32->netfn) ||
715 __put_user(p64->cmd, &p32->cmd) ||
716 __put_user(p64->data_len, &p32->data_len))
717 return -EFAULT;
718 return 0;
721 static long get_compat_ipmi_req(struct ipmi_req *p64,
722 struct compat_ipmi_req __user *p32)
725 compat_uptr_t tmp;
727 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
728 __get_user(tmp, &p32->addr) ||
729 __get_user(p64->addr_len, &p32->addr_len) ||
730 __get_user(p64->msgid, &p32->msgid) ||
731 get_compat_ipmi_msg(&p64->msg, &p32->msg))
732 return -EFAULT;
733 p64->addr = compat_ptr(tmp);
734 return 0;
737 static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
738 struct compat_ipmi_req_settime __user *p32)
740 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
741 get_compat_ipmi_req(&p64->req, &p32->req) ||
742 __get_user(p64->retries, &p32->retries) ||
743 __get_user(p64->retry_time_ms, &p32->retry_time_ms))
744 return -EFAULT;
745 return 0;
748 static long get_compat_ipmi_recv(struct ipmi_recv *p64,
749 struct compat_ipmi_recv __user *p32)
751 compat_uptr_t tmp;
753 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
754 __get_user(p64->recv_type, &p32->recv_type) ||
755 __get_user(tmp, &p32->addr) ||
756 __get_user(p64->addr_len, &p32->addr_len) ||
757 __get_user(p64->msgid, &p32->msgid) ||
758 get_compat_ipmi_msg(&p64->msg, &p32->msg))
759 return -EFAULT;
760 p64->addr = compat_ptr(tmp);
761 return 0;
764 static long put_compat_ipmi_recv(struct ipmi_recv *p64,
765 struct compat_ipmi_recv __user *p32)
767 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
768 __put_user(p64->recv_type, &p32->recv_type) ||
769 __put_user(p64->addr_len, &p32->addr_len) ||
770 __put_user(p64->msgid, &p32->msgid) ||
771 put_compat_ipmi_msg(&p64->msg, &p32->msg))
772 return -EFAULT;
773 return 0;
777 * Handle compatibility ioctls
779 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
780 unsigned long arg)
782 int rc;
783 struct ipmi_file_private *priv = filep->private_data;
785 switch(cmd) {
786 case COMPAT_IPMICTL_SEND_COMMAND:
788 struct ipmi_req rp;
790 if (get_compat_ipmi_req(&rp, compat_ptr(arg)))
791 return -EFAULT;
793 return handle_send_req(priv->user, &rp,
794 priv->default_retries,
795 priv->default_retry_time_ms);
797 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
799 struct ipmi_req_settime sp;
801 if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg)))
802 return -EFAULT;
804 return handle_send_req(priv->user, &sp.req,
805 sp.retries, sp.retry_time_ms);
807 case COMPAT_IPMICTL_RECEIVE_MSG:
808 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
810 struct ipmi_recv __user *precv64;
811 struct ipmi_recv recv64;
813 memset(&recv64, 0, sizeof(recv64));
814 if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
815 return -EFAULT;
817 precv64 = compat_alloc_user_space(sizeof(recv64));
818 if (copy_to_user(precv64, &recv64, sizeof(recv64)))
819 return -EFAULT;
821 rc = ipmi_ioctl(filep,
822 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
823 ? IPMICTL_RECEIVE_MSG
824 : IPMICTL_RECEIVE_MSG_TRUNC),
825 (unsigned long) precv64);
826 if (rc != 0)
827 return rc;
829 if (copy_from_user(&recv64, precv64, sizeof(recv64)))
830 return -EFAULT;
832 if (put_compat_ipmi_recv(&recv64, compat_ptr(arg)))
833 return -EFAULT;
835 return rc;
837 default:
838 return ipmi_ioctl(filep, cmd, arg);
842 static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
843 unsigned long arg)
845 int ret;
847 mutex_lock(&ipmi_mutex);
848 ret = compat_ipmi_ioctl(filep, cmd, arg);
849 mutex_unlock(&ipmi_mutex);
851 return ret;
853 #endif
855 static const struct file_operations ipmi_fops = {
856 .owner = THIS_MODULE,
857 .unlocked_ioctl = ipmi_unlocked_ioctl,
858 #ifdef CONFIG_COMPAT
859 .compat_ioctl = unlocked_compat_ipmi_ioctl,
860 #endif
861 .open = ipmi_open,
862 .release = ipmi_release,
863 .fasync = ipmi_fasync,
864 .poll = ipmi_poll,
865 .llseek = noop_llseek,
868 #define DEVICE_NAME "ipmidev"
870 static int ipmi_major;
871 module_param(ipmi_major, int, 0);
872 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
873 " default, or if you set it to zero, it will choose the next"
874 " available device. Setting it to -1 will disable the"
875 " interface. Other values will set the major device number"
876 " to that value.");
878 /* Keep track of the devices that are registered. */
879 struct ipmi_reg_list {
880 dev_t dev;
881 struct list_head link;
883 static LIST_HEAD(reg_list);
884 static DEFINE_MUTEX(reg_list_mutex);
886 static struct class *ipmi_class;
888 static void ipmi_new_smi(int if_num, struct device *device)
890 dev_t dev = MKDEV(ipmi_major, if_num);
891 struct ipmi_reg_list *entry;
893 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
894 if (!entry) {
895 printk(KERN_ERR "ipmi_devintf: Unable to create the"
896 " ipmi class device link\n");
897 return;
899 entry->dev = dev;
901 mutex_lock(&reg_list_mutex);
902 device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
903 list_add(&entry->link, &reg_list);
904 mutex_unlock(&reg_list_mutex);
907 static void ipmi_smi_gone(int if_num)
909 dev_t dev = MKDEV(ipmi_major, if_num);
910 struct ipmi_reg_list *entry;
912 mutex_lock(&reg_list_mutex);
913 list_for_each_entry(entry, &reg_list, link) {
914 if (entry->dev == dev) {
915 list_del(&entry->link);
916 kfree(entry);
917 break;
920 device_destroy(ipmi_class, dev);
921 mutex_unlock(&reg_list_mutex);
924 static struct ipmi_smi_watcher smi_watcher =
926 .owner = THIS_MODULE,
927 .new_smi = ipmi_new_smi,
928 .smi_gone = ipmi_smi_gone,
931 static int __init init_ipmi_devintf(void)
933 int rv;
935 if (ipmi_major < 0)
936 return -EINVAL;
938 printk(KERN_INFO "ipmi device interface\n");
940 ipmi_class = class_create(THIS_MODULE, "ipmi");
941 if (IS_ERR(ipmi_class)) {
942 printk(KERN_ERR "ipmi: can't register device class\n");
943 return PTR_ERR(ipmi_class);
946 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
947 if (rv < 0) {
948 class_destroy(ipmi_class);
949 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
950 return rv;
953 if (ipmi_major == 0) {
954 ipmi_major = rv;
957 rv = ipmi_smi_watcher_register(&smi_watcher);
958 if (rv) {
959 unregister_chrdev(ipmi_major, DEVICE_NAME);
960 class_destroy(ipmi_class);
961 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
962 return rv;
965 return 0;
967 module_init(init_ipmi_devintf);
969 static void __exit cleanup_ipmi(void)
971 struct ipmi_reg_list *entry, *entry2;
972 mutex_lock(&reg_list_mutex);
973 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
974 list_del(&entry->link);
975 device_destroy(ipmi_class, entry->dev);
976 kfree(entry);
978 mutex_unlock(&reg_list_mutex);
979 class_destroy(ipmi_class);
980 ipmi_smi_watcher_unregister(&smi_watcher);
981 unregister_chrdev(ipmi_major, DEVICE_NAME);
983 module_exit(cleanup_ipmi);
985 MODULE_LICENSE("GPL");
986 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
987 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
988 MODULE_ALIAS("platform:ipmi_si");