usb: xhci-plat: properly handle probe deferral for devm_clk_get()
[linux/fpc-iii.git] / drivers / char / ipmi / ipmi_devintf.c
blob1786574536b21ef06415ae62ff14bdbfd77b3f2c
1 /*
2 * ipmi_devintf.c
4 * Linux device interface for the IPMI message handler.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/errno.h>
37 #include <linux/poll.h>
38 #include <linux/sched.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/mutex.h>
43 #include <linux/init.h>
44 #include <linux/device.h>
45 #include <linux/compat.h>
47 struct ipmi_file_private
49 ipmi_user_t user;
50 spinlock_t recv_msg_lock;
51 struct list_head recv_msgs;
52 struct file *file;
53 struct fasync_struct *fasync_queue;
54 wait_queue_head_t wait;
55 struct mutex recv_mutex;
56 int default_retries;
57 unsigned int default_retry_time_ms;
60 static DEFINE_MUTEX(ipmi_mutex);
61 static void file_receive_handler(struct ipmi_recv_msg *msg,
62 void *handler_data)
64 struct ipmi_file_private *priv = handler_data;
65 int was_empty;
66 unsigned long flags;
68 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
70 was_empty = list_empty(&(priv->recv_msgs));
71 list_add_tail(&(msg->link), &(priv->recv_msgs));
73 if (was_empty) {
74 wake_up_interruptible(&priv->wait);
75 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
78 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
81 static unsigned int ipmi_poll(struct file *file, poll_table *wait)
83 struct ipmi_file_private *priv = file->private_data;
84 unsigned int mask = 0;
85 unsigned long flags;
87 poll_wait(file, &priv->wait, wait);
89 spin_lock_irqsave(&priv->recv_msg_lock, flags);
91 if (!list_empty(&(priv->recv_msgs)))
92 mask |= (POLLIN | POLLRDNORM);
94 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
96 return mask;
99 static int ipmi_fasync(int fd, struct file *file, int on)
101 struct ipmi_file_private *priv = file->private_data;
102 int result;
104 mutex_lock(&ipmi_mutex); /* could race against open() otherwise */
105 result = fasync_helper(fd, file, on, &priv->fasync_queue);
106 mutex_unlock(&ipmi_mutex);
108 return (result);
111 static struct ipmi_user_hndl ipmi_hndlrs =
113 .ipmi_recv_hndl = file_receive_handler,
116 static int ipmi_open(struct inode *inode, struct file *file)
118 int if_num = iminor(inode);
119 int rv;
120 struct ipmi_file_private *priv;
123 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
124 if (!priv)
125 return -ENOMEM;
127 mutex_lock(&ipmi_mutex);
128 priv->file = file;
130 rv = ipmi_create_user(if_num,
131 &ipmi_hndlrs,
132 priv,
133 &(priv->user));
134 if (rv) {
135 kfree(priv);
136 goto out;
139 file->private_data = priv;
141 spin_lock_init(&(priv->recv_msg_lock));
142 INIT_LIST_HEAD(&(priv->recv_msgs));
143 init_waitqueue_head(&priv->wait);
144 priv->fasync_queue = NULL;
145 mutex_init(&priv->recv_mutex);
147 /* Use the low-level defaults. */
148 priv->default_retries = -1;
149 priv->default_retry_time_ms = 0;
151 out:
152 mutex_unlock(&ipmi_mutex);
153 return rv;
156 static int ipmi_release(struct inode *inode, struct file *file)
158 struct ipmi_file_private *priv = file->private_data;
159 int rv;
160 struct ipmi_recv_msg *msg, *next;
162 rv = ipmi_destroy_user(priv->user);
163 if (rv)
164 return rv;
166 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
167 ipmi_free_recv_msg(msg);
170 kfree(priv);
172 return 0;
175 static int handle_send_req(ipmi_user_t user,
176 struct ipmi_req *req,
177 int retries,
178 unsigned int retry_time_ms)
180 int rv;
181 struct ipmi_addr addr;
182 struct kernel_ipmi_msg msg;
184 if (req->addr_len > sizeof(struct ipmi_addr))
185 return -EINVAL;
187 if (copy_from_user(&addr, req->addr, req->addr_len))
188 return -EFAULT;
190 msg.netfn = req->msg.netfn;
191 msg.cmd = req->msg.cmd;
192 msg.data_len = req->msg.data_len;
193 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
194 if (!msg.data)
195 return -ENOMEM;
197 /* From here out we cannot return, we must jump to "out" for
198 error exits to free msgdata. */
200 rv = ipmi_validate_addr(&addr, req->addr_len);
201 if (rv)
202 goto out;
204 if (req->msg.data != NULL) {
205 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
206 rv = -EMSGSIZE;
207 goto out;
210 if (copy_from_user(msg.data,
211 req->msg.data,
212 req->msg.data_len))
214 rv = -EFAULT;
215 goto out;
217 } else {
218 msg.data_len = 0;
221 rv = ipmi_request_settime(user,
222 &addr,
223 req->msgid,
224 &msg,
225 NULL,
227 retries,
228 retry_time_ms);
229 out:
230 kfree(msg.data);
231 return rv;
234 static int ipmi_ioctl(struct file *file,
235 unsigned int cmd,
236 unsigned long data)
238 int rv = -EINVAL;
239 struct ipmi_file_private *priv = file->private_data;
240 void __user *arg = (void __user *)data;
242 switch (cmd)
244 case IPMICTL_SEND_COMMAND:
246 struct ipmi_req req;
248 if (copy_from_user(&req, arg, sizeof(req))) {
249 rv = -EFAULT;
250 break;
253 rv = handle_send_req(priv->user,
254 &req,
255 priv->default_retries,
256 priv->default_retry_time_ms);
257 break;
260 case IPMICTL_SEND_COMMAND_SETTIME:
262 struct ipmi_req_settime req;
264 if (copy_from_user(&req, arg, sizeof(req))) {
265 rv = -EFAULT;
266 break;
269 rv = handle_send_req(priv->user,
270 &req.req,
271 req.retries,
272 req.retry_time_ms);
273 break;
276 case IPMICTL_RECEIVE_MSG:
277 case IPMICTL_RECEIVE_MSG_TRUNC:
279 struct ipmi_recv rsp;
280 int addr_len;
281 struct list_head *entry;
282 struct ipmi_recv_msg *msg;
283 unsigned long flags;
286 rv = 0;
287 if (copy_from_user(&rsp, arg, sizeof(rsp))) {
288 rv = -EFAULT;
289 break;
292 /* We claim a mutex because we don't want two
293 users getting something from the queue at a time.
294 Since we have to release the spinlock before we can
295 copy the data to the user, it's possible another
296 user will grab something from the queue, too. Then
297 the messages might get out of order if something
298 fails and the message gets put back onto the
299 queue. This mutex prevents that problem. */
300 mutex_lock(&priv->recv_mutex);
302 /* Grab the message off the list. */
303 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
304 if (list_empty(&(priv->recv_msgs))) {
305 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
306 rv = -EAGAIN;
307 goto recv_err;
309 entry = priv->recv_msgs.next;
310 msg = list_entry(entry, struct ipmi_recv_msg, link);
311 list_del(entry);
312 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
314 addr_len = ipmi_addr_length(msg->addr.addr_type);
315 if (rsp.addr_len < addr_len)
317 rv = -EINVAL;
318 goto recv_putback_on_err;
321 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
322 rv = -EFAULT;
323 goto recv_putback_on_err;
325 rsp.addr_len = addr_len;
327 rsp.recv_type = msg->recv_type;
328 rsp.msgid = msg->msgid;
329 rsp.msg.netfn = msg->msg.netfn;
330 rsp.msg.cmd = msg->msg.cmd;
332 if (msg->msg.data_len > 0) {
333 if (rsp.msg.data_len < msg->msg.data_len) {
334 rv = -EMSGSIZE;
335 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
336 msg->msg.data_len = rsp.msg.data_len;
337 } else {
338 goto recv_putback_on_err;
342 if (copy_to_user(rsp.msg.data,
343 msg->msg.data,
344 msg->msg.data_len))
346 rv = -EFAULT;
347 goto recv_putback_on_err;
349 rsp.msg.data_len = msg->msg.data_len;
350 } else {
351 rsp.msg.data_len = 0;
354 if (copy_to_user(arg, &rsp, sizeof(rsp))) {
355 rv = -EFAULT;
356 goto recv_putback_on_err;
359 mutex_unlock(&priv->recv_mutex);
360 ipmi_free_recv_msg(msg);
361 break;
363 recv_putback_on_err:
364 /* If we got an error, put the message back onto
365 the head of the queue. */
366 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
367 list_add(entry, &(priv->recv_msgs));
368 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
369 mutex_unlock(&priv->recv_mutex);
370 break;
372 recv_err:
373 mutex_unlock(&priv->recv_mutex);
374 break;
377 case IPMICTL_REGISTER_FOR_CMD:
379 struct ipmi_cmdspec val;
381 if (copy_from_user(&val, arg, sizeof(val))) {
382 rv = -EFAULT;
383 break;
386 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
387 IPMI_CHAN_ALL);
388 break;
391 case IPMICTL_UNREGISTER_FOR_CMD:
393 struct ipmi_cmdspec val;
395 if (copy_from_user(&val, arg, sizeof(val))) {
396 rv = -EFAULT;
397 break;
400 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
401 IPMI_CHAN_ALL);
402 break;
405 case IPMICTL_REGISTER_FOR_CMD_CHANS:
407 struct ipmi_cmdspec_chans val;
409 if (copy_from_user(&val, arg, sizeof(val))) {
410 rv = -EFAULT;
411 break;
414 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
415 val.chans);
416 break;
419 case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
421 struct ipmi_cmdspec_chans val;
423 if (copy_from_user(&val, arg, sizeof(val))) {
424 rv = -EFAULT;
425 break;
428 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
429 val.chans);
430 break;
433 case IPMICTL_SET_GETS_EVENTS_CMD:
435 int val;
437 if (copy_from_user(&val, arg, sizeof(val))) {
438 rv = -EFAULT;
439 break;
442 rv = ipmi_set_gets_events(priv->user, val);
443 break;
446 /* The next four are legacy, not per-channel. */
447 case IPMICTL_SET_MY_ADDRESS_CMD:
449 unsigned int val;
451 if (copy_from_user(&val, arg, sizeof(val))) {
452 rv = -EFAULT;
453 break;
456 rv = ipmi_set_my_address(priv->user, 0, val);
457 break;
460 case IPMICTL_GET_MY_ADDRESS_CMD:
462 unsigned int val;
463 unsigned char rval;
465 rv = ipmi_get_my_address(priv->user, 0, &rval);
466 if (rv)
467 break;
469 val = rval;
471 if (copy_to_user(arg, &val, sizeof(val))) {
472 rv = -EFAULT;
473 break;
475 break;
478 case IPMICTL_SET_MY_LUN_CMD:
480 unsigned int val;
482 if (copy_from_user(&val, arg, sizeof(val))) {
483 rv = -EFAULT;
484 break;
487 rv = ipmi_set_my_LUN(priv->user, 0, val);
488 break;
491 case IPMICTL_GET_MY_LUN_CMD:
493 unsigned int val;
494 unsigned char rval;
496 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
497 if (rv)
498 break;
500 val = rval;
502 if (copy_to_user(arg, &val, sizeof(val))) {
503 rv = -EFAULT;
504 break;
506 break;
509 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
511 struct ipmi_channel_lun_address_set val;
513 if (copy_from_user(&val, arg, sizeof(val))) {
514 rv = -EFAULT;
515 break;
518 return ipmi_set_my_address(priv->user, val.channel, val.value);
519 break;
522 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
524 struct ipmi_channel_lun_address_set val;
526 if (copy_from_user(&val, arg, sizeof(val))) {
527 rv = -EFAULT;
528 break;
531 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
532 if (rv)
533 break;
535 if (copy_to_user(arg, &val, sizeof(val))) {
536 rv = -EFAULT;
537 break;
539 break;
542 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
544 struct ipmi_channel_lun_address_set val;
546 if (copy_from_user(&val, arg, sizeof(val))) {
547 rv = -EFAULT;
548 break;
551 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
552 break;
555 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
557 struct ipmi_channel_lun_address_set val;
559 if (copy_from_user(&val, arg, sizeof(val))) {
560 rv = -EFAULT;
561 break;
564 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
565 if (rv)
566 break;
568 if (copy_to_user(arg, &val, sizeof(val))) {
569 rv = -EFAULT;
570 break;
572 break;
575 case IPMICTL_SET_TIMING_PARMS_CMD:
577 struct ipmi_timing_parms parms;
579 if (copy_from_user(&parms, arg, sizeof(parms))) {
580 rv = -EFAULT;
581 break;
584 priv->default_retries = parms.retries;
585 priv->default_retry_time_ms = parms.retry_time_ms;
586 rv = 0;
587 break;
590 case IPMICTL_GET_TIMING_PARMS_CMD:
592 struct ipmi_timing_parms parms;
594 parms.retries = priv->default_retries;
595 parms.retry_time_ms = priv->default_retry_time_ms;
597 if (copy_to_user(arg, &parms, sizeof(parms))) {
598 rv = -EFAULT;
599 break;
602 rv = 0;
603 break;
606 case IPMICTL_GET_MAINTENANCE_MODE_CMD:
608 int mode;
610 mode = ipmi_get_maintenance_mode(priv->user);
611 if (copy_to_user(arg, &mode, sizeof(mode))) {
612 rv = -EFAULT;
613 break;
615 rv = 0;
616 break;
619 case IPMICTL_SET_MAINTENANCE_MODE_CMD:
621 int mode;
623 if (copy_from_user(&mode, arg, sizeof(mode))) {
624 rv = -EFAULT;
625 break;
627 rv = ipmi_set_maintenance_mode(priv->user, mode);
628 break;
632 return rv;
636 * Note: it doesn't make sense to take the BKL here but
637 * not in compat_ipmi_ioctl. -arnd
639 static long ipmi_unlocked_ioctl(struct file *file,
640 unsigned int cmd,
641 unsigned long data)
643 int ret;
645 mutex_lock(&ipmi_mutex);
646 ret = ipmi_ioctl(file, cmd, data);
647 mutex_unlock(&ipmi_mutex);
649 return ret;
652 #ifdef CONFIG_COMPAT
655 * The following code contains code for supporting 32-bit compatible
656 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
657 * 64-bit kernel
659 #define COMPAT_IPMICTL_SEND_COMMAND \
660 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
661 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
662 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
663 #define COMPAT_IPMICTL_RECEIVE_MSG \
664 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
665 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
666 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
668 struct compat_ipmi_msg {
669 u8 netfn;
670 u8 cmd;
671 u16 data_len;
672 compat_uptr_t data;
675 struct compat_ipmi_req {
676 compat_uptr_t addr;
677 compat_uint_t addr_len;
678 compat_long_t msgid;
679 struct compat_ipmi_msg msg;
682 struct compat_ipmi_recv {
683 compat_int_t recv_type;
684 compat_uptr_t addr;
685 compat_uint_t addr_len;
686 compat_long_t msgid;
687 struct compat_ipmi_msg msg;
690 struct compat_ipmi_req_settime {
691 struct compat_ipmi_req req;
692 compat_int_t retries;
693 compat_uint_t retry_time_ms;
697 * Define some helper functions for copying IPMI data
699 static long get_compat_ipmi_msg(struct ipmi_msg *p64,
700 struct compat_ipmi_msg __user *p32)
702 compat_uptr_t tmp;
704 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
705 __get_user(p64->netfn, &p32->netfn) ||
706 __get_user(p64->cmd, &p32->cmd) ||
707 __get_user(p64->data_len, &p32->data_len) ||
708 __get_user(tmp, &p32->data))
709 return -EFAULT;
710 p64->data = compat_ptr(tmp);
711 return 0;
714 static long put_compat_ipmi_msg(struct ipmi_msg *p64,
715 struct compat_ipmi_msg __user *p32)
717 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
718 __put_user(p64->netfn, &p32->netfn) ||
719 __put_user(p64->cmd, &p32->cmd) ||
720 __put_user(p64->data_len, &p32->data_len))
721 return -EFAULT;
722 return 0;
725 static long get_compat_ipmi_req(struct ipmi_req *p64,
726 struct compat_ipmi_req __user *p32)
729 compat_uptr_t tmp;
731 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
732 __get_user(tmp, &p32->addr) ||
733 __get_user(p64->addr_len, &p32->addr_len) ||
734 __get_user(p64->msgid, &p32->msgid) ||
735 get_compat_ipmi_msg(&p64->msg, &p32->msg))
736 return -EFAULT;
737 p64->addr = compat_ptr(tmp);
738 return 0;
741 static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
742 struct compat_ipmi_req_settime __user *p32)
744 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
745 get_compat_ipmi_req(&p64->req, &p32->req) ||
746 __get_user(p64->retries, &p32->retries) ||
747 __get_user(p64->retry_time_ms, &p32->retry_time_ms))
748 return -EFAULT;
749 return 0;
752 static long get_compat_ipmi_recv(struct ipmi_recv *p64,
753 struct compat_ipmi_recv __user *p32)
755 compat_uptr_t tmp;
757 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
758 __get_user(p64->recv_type, &p32->recv_type) ||
759 __get_user(tmp, &p32->addr) ||
760 __get_user(p64->addr_len, &p32->addr_len) ||
761 __get_user(p64->msgid, &p32->msgid) ||
762 get_compat_ipmi_msg(&p64->msg, &p32->msg))
763 return -EFAULT;
764 p64->addr = compat_ptr(tmp);
765 return 0;
768 static long put_compat_ipmi_recv(struct ipmi_recv *p64,
769 struct compat_ipmi_recv __user *p32)
771 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
772 __put_user(p64->recv_type, &p32->recv_type) ||
773 __put_user(p64->addr_len, &p32->addr_len) ||
774 __put_user(p64->msgid, &p32->msgid) ||
775 put_compat_ipmi_msg(&p64->msg, &p32->msg))
776 return -EFAULT;
777 return 0;
781 * Handle compatibility ioctls
783 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
784 unsigned long arg)
786 int rc;
787 struct ipmi_file_private *priv = filep->private_data;
789 switch(cmd) {
790 case COMPAT_IPMICTL_SEND_COMMAND:
792 struct ipmi_req rp;
794 if (get_compat_ipmi_req(&rp, compat_ptr(arg)))
795 return -EFAULT;
797 return handle_send_req(priv->user, &rp,
798 priv->default_retries,
799 priv->default_retry_time_ms);
801 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
803 struct ipmi_req_settime sp;
805 if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg)))
806 return -EFAULT;
808 return handle_send_req(priv->user, &sp.req,
809 sp.retries, sp.retry_time_ms);
811 case COMPAT_IPMICTL_RECEIVE_MSG:
812 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
814 struct ipmi_recv __user *precv64;
815 struct ipmi_recv recv64;
817 memset(&recv64, 0, sizeof(recv64));
818 if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
819 return -EFAULT;
821 precv64 = compat_alloc_user_space(sizeof(recv64));
822 if (copy_to_user(precv64, &recv64, sizeof(recv64)))
823 return -EFAULT;
825 rc = ipmi_ioctl(filep,
826 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
827 ? IPMICTL_RECEIVE_MSG
828 : IPMICTL_RECEIVE_MSG_TRUNC),
829 (unsigned long) precv64);
830 if (rc != 0)
831 return rc;
833 if (copy_from_user(&recv64, precv64, sizeof(recv64)))
834 return -EFAULT;
836 if (put_compat_ipmi_recv(&recv64, compat_ptr(arg)))
837 return -EFAULT;
839 return rc;
841 default:
842 return ipmi_ioctl(filep, cmd, arg);
846 static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
847 unsigned long arg)
849 int ret;
851 mutex_lock(&ipmi_mutex);
852 ret = compat_ipmi_ioctl(filep, cmd, arg);
853 mutex_unlock(&ipmi_mutex);
855 return ret;
857 #endif
859 static const struct file_operations ipmi_fops = {
860 .owner = THIS_MODULE,
861 .unlocked_ioctl = ipmi_unlocked_ioctl,
862 #ifdef CONFIG_COMPAT
863 .compat_ioctl = unlocked_compat_ipmi_ioctl,
864 #endif
865 .open = ipmi_open,
866 .release = ipmi_release,
867 .fasync = ipmi_fasync,
868 .poll = ipmi_poll,
869 .llseek = noop_llseek,
872 #define DEVICE_NAME "ipmidev"
874 static int ipmi_major;
875 module_param(ipmi_major, int, 0);
876 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
877 " default, or if you set it to zero, it will choose the next"
878 " available device. Setting it to -1 will disable the"
879 " interface. Other values will set the major device number"
880 " to that value.");
882 /* Keep track of the devices that are registered. */
883 struct ipmi_reg_list {
884 dev_t dev;
885 struct list_head link;
887 static LIST_HEAD(reg_list);
888 static DEFINE_MUTEX(reg_list_mutex);
890 static struct class *ipmi_class;
892 static void ipmi_new_smi(int if_num, struct device *device)
894 dev_t dev = MKDEV(ipmi_major, if_num);
895 struct ipmi_reg_list *entry;
897 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
898 if (!entry) {
899 printk(KERN_ERR "ipmi_devintf: Unable to create the"
900 " ipmi class device link\n");
901 return;
903 entry->dev = dev;
905 mutex_lock(&reg_list_mutex);
906 device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
907 list_add(&entry->link, &reg_list);
908 mutex_unlock(&reg_list_mutex);
911 static void ipmi_smi_gone(int if_num)
913 dev_t dev = MKDEV(ipmi_major, if_num);
914 struct ipmi_reg_list *entry;
916 mutex_lock(&reg_list_mutex);
917 list_for_each_entry(entry, &reg_list, link) {
918 if (entry->dev == dev) {
919 list_del(&entry->link);
920 kfree(entry);
921 break;
924 device_destroy(ipmi_class, dev);
925 mutex_unlock(&reg_list_mutex);
928 static struct ipmi_smi_watcher smi_watcher =
930 .owner = THIS_MODULE,
931 .new_smi = ipmi_new_smi,
932 .smi_gone = ipmi_smi_gone,
935 static int __init init_ipmi_devintf(void)
937 int rv;
939 if (ipmi_major < 0)
940 return -EINVAL;
942 printk(KERN_INFO "ipmi device interface\n");
944 ipmi_class = class_create(THIS_MODULE, "ipmi");
945 if (IS_ERR(ipmi_class)) {
946 printk(KERN_ERR "ipmi: can't register device class\n");
947 return PTR_ERR(ipmi_class);
950 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
951 if (rv < 0) {
952 class_destroy(ipmi_class);
953 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
954 return rv;
957 if (ipmi_major == 0) {
958 ipmi_major = rv;
961 rv = ipmi_smi_watcher_register(&smi_watcher);
962 if (rv) {
963 unregister_chrdev(ipmi_major, DEVICE_NAME);
964 class_destroy(ipmi_class);
965 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
966 return rv;
969 return 0;
971 module_init(init_ipmi_devintf);
973 static void __exit cleanup_ipmi(void)
975 struct ipmi_reg_list *entry, *entry2;
976 mutex_lock(&reg_list_mutex);
977 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
978 list_del(&entry->link);
979 device_destroy(ipmi_class, entry->dev);
980 kfree(entry);
982 mutex_unlock(&reg_list_mutex);
983 class_destroy(ipmi_class);
984 ipmi_smi_watcher_unregister(&smi_watcher);
985 unregister_chrdev(ipmi_major, DEVICE_NAME);
987 module_exit(cleanup_ipmi);
989 MODULE_LICENSE("GPL");
990 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
991 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
992 MODULE_ALIAS("platform:ipmi_si");