[ARM] pxa: Gumstix Verdex PCMCIA support
[linux-2.6/verdex.git] / drivers / char / ipmi / ipmi_devintf.c
blob65545de3dbf4ded375e55467fcaf5a8360000dfd
1 /*
2 * ipmi_devintf.c
4 * Linux device interface for the IPMI message handler.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/poll.h>
39 #include <linux/sched.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/mutex.h>
44 #include <linux/init.h>
45 #include <linux/device.h>
46 #include <linux/compat.h>
47 #include <linux/smp_lock.h>
49 struct ipmi_file_private
51 ipmi_user_t user;
52 spinlock_t recv_msg_lock;
53 struct list_head recv_msgs;
54 struct file *file;
55 struct fasync_struct *fasync_queue;
56 wait_queue_head_t wait;
57 struct mutex recv_mutex;
58 int default_retries;
59 unsigned int default_retry_time_ms;
62 static void file_receive_handler(struct ipmi_recv_msg *msg,
63 void *handler_data)
65 struct ipmi_file_private *priv = handler_data;
66 int was_empty;
67 unsigned long flags;
69 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
71 was_empty = list_empty(&(priv->recv_msgs));
72 list_add_tail(&(msg->link), &(priv->recv_msgs));
74 if (was_empty) {
75 wake_up_interruptible(&priv->wait);
76 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
79 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
82 static unsigned int ipmi_poll(struct file *file, poll_table *wait)
84 struct ipmi_file_private *priv = file->private_data;
85 unsigned int mask = 0;
86 unsigned long flags;
88 poll_wait(file, &priv->wait, wait);
90 spin_lock_irqsave(&priv->recv_msg_lock, flags);
92 if (!list_empty(&(priv->recv_msgs)))
93 mask |= (POLLIN | POLLRDNORM);
95 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
97 return mask;
100 static int ipmi_fasync(int fd, struct file *file, int on)
102 struct ipmi_file_private *priv = file->private_data;
103 int result;
105 lock_kernel(); /* could race against open() otherwise */
106 result = fasync_helper(fd, file, on, &priv->fasync_queue);
107 unlock_kernel();
109 return (result);
112 static struct ipmi_user_hndl ipmi_hndlrs =
114 .ipmi_recv_hndl = file_receive_handler,
117 static int ipmi_open(struct inode *inode, struct file *file)
119 int if_num = iminor(inode);
120 int rv;
121 struct ipmi_file_private *priv;
124 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
125 if (!priv)
126 return -ENOMEM;
128 lock_kernel();
129 priv->file = file;
131 rv = ipmi_create_user(if_num,
132 &ipmi_hndlrs,
133 priv,
134 &(priv->user));
135 if (rv) {
136 kfree(priv);
137 goto out;
140 file->private_data = priv;
142 spin_lock_init(&(priv->recv_msg_lock));
143 INIT_LIST_HEAD(&(priv->recv_msgs));
144 init_waitqueue_head(&priv->wait);
145 priv->fasync_queue = NULL;
146 mutex_init(&priv->recv_mutex);
148 /* Use the low-level defaults. */
149 priv->default_retries = -1;
150 priv->default_retry_time_ms = 0;
152 out:
153 unlock_kernel();
154 return rv;
157 static int ipmi_release(struct inode *inode, struct file *file)
159 struct ipmi_file_private *priv = file->private_data;
160 int rv;
162 rv = ipmi_destroy_user(priv->user);
163 if (rv)
164 return rv;
166 /* FIXME - free the messages in the list. */
167 kfree(priv);
169 return 0;
172 static int handle_send_req(ipmi_user_t user,
173 struct ipmi_req *req,
174 int retries,
175 unsigned int retry_time_ms)
177 int rv;
178 struct ipmi_addr addr;
179 struct kernel_ipmi_msg msg;
181 if (req->addr_len > sizeof(struct ipmi_addr))
182 return -EINVAL;
184 if (copy_from_user(&addr, req->addr, req->addr_len))
185 return -EFAULT;
187 msg.netfn = req->msg.netfn;
188 msg.cmd = req->msg.cmd;
189 msg.data_len = req->msg.data_len;
190 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
191 if (!msg.data)
192 return -ENOMEM;
194 /* From here out we cannot return, we must jump to "out" for
195 error exits to free msgdata. */
197 rv = ipmi_validate_addr(&addr, req->addr_len);
198 if (rv)
199 goto out;
201 if (req->msg.data != NULL) {
202 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
203 rv = -EMSGSIZE;
204 goto out;
207 if (copy_from_user(msg.data,
208 req->msg.data,
209 req->msg.data_len))
211 rv = -EFAULT;
212 goto out;
214 } else {
215 msg.data_len = 0;
218 rv = ipmi_request_settime(user,
219 &addr,
220 req->msgid,
221 &msg,
222 NULL,
224 retries,
225 retry_time_ms);
226 out:
227 kfree(msg.data);
228 return rv;
231 static int ipmi_ioctl(struct inode *inode,
232 struct file *file,
233 unsigned int cmd,
234 unsigned long data)
236 int rv = -EINVAL;
237 struct ipmi_file_private *priv = file->private_data;
238 void __user *arg = (void __user *)data;
240 switch (cmd)
242 case IPMICTL_SEND_COMMAND:
244 struct ipmi_req req;
246 if (copy_from_user(&req, arg, sizeof(req))) {
247 rv = -EFAULT;
248 break;
251 rv = handle_send_req(priv->user,
252 &req,
253 priv->default_retries,
254 priv->default_retry_time_ms);
255 break;
258 case IPMICTL_SEND_COMMAND_SETTIME:
260 struct ipmi_req_settime req;
262 if (copy_from_user(&req, arg, sizeof(req))) {
263 rv = -EFAULT;
264 break;
267 rv = handle_send_req(priv->user,
268 &req.req,
269 req.retries,
270 req.retry_time_ms);
271 break;
274 case IPMICTL_RECEIVE_MSG:
275 case IPMICTL_RECEIVE_MSG_TRUNC:
277 struct ipmi_recv rsp;
278 int addr_len;
279 struct list_head *entry;
280 struct ipmi_recv_msg *msg;
281 unsigned long flags;
284 rv = 0;
285 if (copy_from_user(&rsp, arg, sizeof(rsp))) {
286 rv = -EFAULT;
287 break;
290 /* We claim a mutex because we don't want two
291 users getting something from the queue at a time.
292 Since we have to release the spinlock before we can
293 copy the data to the user, it's possible another
294 user will grab something from the queue, too. Then
295 the messages might get out of order if something
296 fails and the message gets put back onto the
297 queue. This mutex prevents that problem. */
298 mutex_lock(&priv->recv_mutex);
300 /* Grab the message off the list. */
301 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
302 if (list_empty(&(priv->recv_msgs))) {
303 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
304 rv = -EAGAIN;
305 goto recv_err;
307 entry = priv->recv_msgs.next;
308 msg = list_entry(entry, struct ipmi_recv_msg, link);
309 list_del(entry);
310 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
312 addr_len = ipmi_addr_length(msg->addr.addr_type);
313 if (rsp.addr_len < addr_len)
315 rv = -EINVAL;
316 goto recv_putback_on_err;
319 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
320 rv = -EFAULT;
321 goto recv_putback_on_err;
323 rsp.addr_len = addr_len;
325 rsp.recv_type = msg->recv_type;
326 rsp.msgid = msg->msgid;
327 rsp.msg.netfn = msg->msg.netfn;
328 rsp.msg.cmd = msg->msg.cmd;
330 if (msg->msg.data_len > 0) {
331 if (rsp.msg.data_len < msg->msg.data_len) {
332 rv = -EMSGSIZE;
333 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
334 msg->msg.data_len = rsp.msg.data_len;
335 } else {
336 goto recv_putback_on_err;
340 if (copy_to_user(rsp.msg.data,
341 msg->msg.data,
342 msg->msg.data_len))
344 rv = -EFAULT;
345 goto recv_putback_on_err;
347 rsp.msg.data_len = msg->msg.data_len;
348 } else {
349 rsp.msg.data_len = 0;
352 if (copy_to_user(arg, &rsp, sizeof(rsp))) {
353 rv = -EFAULT;
354 goto recv_putback_on_err;
357 mutex_unlock(&priv->recv_mutex);
358 ipmi_free_recv_msg(msg);
359 break;
361 recv_putback_on_err:
362 /* If we got an error, put the message back onto
363 the head of the queue. */
364 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
365 list_add(entry, &(priv->recv_msgs));
366 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
367 mutex_unlock(&priv->recv_mutex);
368 break;
370 recv_err:
371 mutex_unlock(&priv->recv_mutex);
372 break;
375 case IPMICTL_REGISTER_FOR_CMD:
377 struct ipmi_cmdspec val;
379 if (copy_from_user(&val, arg, sizeof(val))) {
380 rv = -EFAULT;
381 break;
384 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
385 IPMI_CHAN_ALL);
386 break;
389 case IPMICTL_UNREGISTER_FOR_CMD:
391 struct ipmi_cmdspec val;
393 if (copy_from_user(&val, arg, sizeof(val))) {
394 rv = -EFAULT;
395 break;
398 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
399 IPMI_CHAN_ALL);
400 break;
403 case IPMICTL_REGISTER_FOR_CMD_CHANS:
405 struct ipmi_cmdspec_chans val;
407 if (copy_from_user(&val, arg, sizeof(val))) {
408 rv = -EFAULT;
409 break;
412 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
413 val.chans);
414 break;
417 case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
419 struct ipmi_cmdspec_chans val;
421 if (copy_from_user(&val, arg, sizeof(val))) {
422 rv = -EFAULT;
423 break;
426 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
427 val.chans);
428 break;
431 case IPMICTL_SET_GETS_EVENTS_CMD:
433 int val;
435 if (copy_from_user(&val, arg, sizeof(val))) {
436 rv = -EFAULT;
437 break;
440 rv = ipmi_set_gets_events(priv->user, val);
441 break;
444 /* The next four are legacy, not per-channel. */
445 case IPMICTL_SET_MY_ADDRESS_CMD:
447 unsigned int val;
449 if (copy_from_user(&val, arg, sizeof(val))) {
450 rv = -EFAULT;
451 break;
454 rv = ipmi_set_my_address(priv->user, 0, val);
455 break;
458 case IPMICTL_GET_MY_ADDRESS_CMD:
460 unsigned int val;
461 unsigned char rval;
463 rv = ipmi_get_my_address(priv->user, 0, &rval);
464 if (rv)
465 break;
467 val = rval;
469 if (copy_to_user(arg, &val, sizeof(val))) {
470 rv = -EFAULT;
471 break;
473 break;
476 case IPMICTL_SET_MY_LUN_CMD:
478 unsigned int val;
480 if (copy_from_user(&val, arg, sizeof(val))) {
481 rv = -EFAULT;
482 break;
485 rv = ipmi_set_my_LUN(priv->user, 0, val);
486 break;
489 case IPMICTL_GET_MY_LUN_CMD:
491 unsigned int val;
492 unsigned char rval;
494 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
495 if (rv)
496 break;
498 val = rval;
500 if (copy_to_user(arg, &val, sizeof(val))) {
501 rv = -EFAULT;
502 break;
504 break;
507 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
509 struct ipmi_channel_lun_address_set val;
511 if (copy_from_user(&val, arg, sizeof(val))) {
512 rv = -EFAULT;
513 break;
516 return ipmi_set_my_address(priv->user, val.channel, val.value);
517 break;
520 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
522 struct ipmi_channel_lun_address_set val;
524 if (copy_from_user(&val, arg, sizeof(val))) {
525 rv = -EFAULT;
526 break;
529 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
530 if (rv)
531 break;
533 if (copy_to_user(arg, &val, sizeof(val))) {
534 rv = -EFAULT;
535 break;
537 break;
540 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
542 struct ipmi_channel_lun_address_set val;
544 if (copy_from_user(&val, arg, sizeof(val))) {
545 rv = -EFAULT;
546 break;
549 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
550 break;
553 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
555 struct ipmi_channel_lun_address_set val;
557 if (copy_from_user(&val, arg, sizeof(val))) {
558 rv = -EFAULT;
559 break;
562 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
563 if (rv)
564 break;
566 if (copy_to_user(arg, &val, sizeof(val))) {
567 rv = -EFAULT;
568 break;
570 break;
573 case IPMICTL_SET_TIMING_PARMS_CMD:
575 struct ipmi_timing_parms parms;
577 if (copy_from_user(&parms, arg, sizeof(parms))) {
578 rv = -EFAULT;
579 break;
582 priv->default_retries = parms.retries;
583 priv->default_retry_time_ms = parms.retry_time_ms;
584 rv = 0;
585 break;
588 case IPMICTL_GET_TIMING_PARMS_CMD:
590 struct ipmi_timing_parms parms;
592 parms.retries = priv->default_retries;
593 parms.retry_time_ms = priv->default_retry_time_ms;
595 if (copy_to_user(arg, &parms, sizeof(parms))) {
596 rv = -EFAULT;
597 break;
600 rv = 0;
601 break;
604 case IPMICTL_GET_MAINTENANCE_MODE_CMD:
606 int mode;
608 mode = ipmi_get_maintenance_mode(priv->user);
609 if (copy_to_user(arg, &mode, sizeof(mode))) {
610 rv = -EFAULT;
611 break;
613 rv = 0;
614 break;
617 case IPMICTL_SET_MAINTENANCE_MODE_CMD:
619 int mode;
621 if (copy_from_user(&mode, arg, sizeof(mode))) {
622 rv = -EFAULT;
623 break;
625 rv = ipmi_set_maintenance_mode(priv->user, mode);
626 break;
630 return rv;
633 #ifdef CONFIG_COMPAT
636 * The following code contains code for supporting 32-bit compatible
637 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
638 * 64-bit kernel
640 #define COMPAT_IPMICTL_SEND_COMMAND \
641 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
642 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
643 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
644 #define COMPAT_IPMICTL_RECEIVE_MSG \
645 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
646 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
647 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
649 struct compat_ipmi_msg {
650 u8 netfn;
651 u8 cmd;
652 u16 data_len;
653 compat_uptr_t data;
656 struct compat_ipmi_req {
657 compat_uptr_t addr;
658 compat_uint_t addr_len;
659 compat_long_t msgid;
660 struct compat_ipmi_msg msg;
663 struct compat_ipmi_recv {
664 compat_int_t recv_type;
665 compat_uptr_t addr;
666 compat_uint_t addr_len;
667 compat_long_t msgid;
668 struct compat_ipmi_msg msg;
671 struct compat_ipmi_req_settime {
672 struct compat_ipmi_req req;
673 compat_int_t retries;
674 compat_uint_t retry_time_ms;
678 * Define some helper functions for copying IPMI data
680 static long get_compat_ipmi_msg(struct ipmi_msg *p64,
681 struct compat_ipmi_msg __user *p32)
683 compat_uptr_t tmp;
685 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
686 __get_user(p64->netfn, &p32->netfn) ||
687 __get_user(p64->cmd, &p32->cmd) ||
688 __get_user(p64->data_len, &p32->data_len) ||
689 __get_user(tmp, &p32->data))
690 return -EFAULT;
691 p64->data = compat_ptr(tmp);
692 return 0;
695 static long put_compat_ipmi_msg(struct ipmi_msg *p64,
696 struct compat_ipmi_msg __user *p32)
698 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
699 __put_user(p64->netfn, &p32->netfn) ||
700 __put_user(p64->cmd, &p32->cmd) ||
701 __put_user(p64->data_len, &p32->data_len))
702 return -EFAULT;
703 return 0;
706 static long get_compat_ipmi_req(struct ipmi_req *p64,
707 struct compat_ipmi_req __user *p32)
710 compat_uptr_t tmp;
712 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
713 __get_user(tmp, &p32->addr) ||
714 __get_user(p64->addr_len, &p32->addr_len) ||
715 __get_user(p64->msgid, &p32->msgid) ||
716 get_compat_ipmi_msg(&p64->msg, &p32->msg))
717 return -EFAULT;
718 p64->addr = compat_ptr(tmp);
719 return 0;
722 static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
723 struct compat_ipmi_req_settime __user *p32)
725 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
726 get_compat_ipmi_req(&p64->req, &p32->req) ||
727 __get_user(p64->retries, &p32->retries) ||
728 __get_user(p64->retry_time_ms, &p32->retry_time_ms))
729 return -EFAULT;
730 return 0;
733 static long get_compat_ipmi_recv(struct ipmi_recv *p64,
734 struct compat_ipmi_recv __user *p32)
736 compat_uptr_t tmp;
738 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
739 __get_user(p64->recv_type, &p32->recv_type) ||
740 __get_user(tmp, &p32->addr) ||
741 __get_user(p64->addr_len, &p32->addr_len) ||
742 __get_user(p64->msgid, &p32->msgid) ||
743 get_compat_ipmi_msg(&p64->msg, &p32->msg))
744 return -EFAULT;
745 p64->addr = compat_ptr(tmp);
746 return 0;
749 static long put_compat_ipmi_recv(struct ipmi_recv *p64,
750 struct compat_ipmi_recv __user *p32)
752 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
753 __put_user(p64->recv_type, &p32->recv_type) ||
754 __put_user(p64->addr_len, &p32->addr_len) ||
755 __put_user(p64->msgid, &p32->msgid) ||
756 put_compat_ipmi_msg(&p64->msg, &p32->msg))
757 return -EFAULT;
758 return 0;
762 * Handle compatibility ioctls
764 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
765 unsigned long arg)
767 int rc;
768 struct ipmi_file_private *priv = filep->private_data;
770 switch(cmd) {
771 case COMPAT_IPMICTL_SEND_COMMAND:
773 struct ipmi_req rp;
775 if (get_compat_ipmi_req(&rp, compat_ptr(arg)))
776 return -EFAULT;
778 return handle_send_req(priv->user, &rp,
779 priv->default_retries,
780 priv->default_retry_time_ms);
782 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
784 struct ipmi_req_settime sp;
786 if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg)))
787 return -EFAULT;
789 return handle_send_req(priv->user, &sp.req,
790 sp.retries, sp.retry_time_ms);
792 case COMPAT_IPMICTL_RECEIVE_MSG:
793 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
795 struct ipmi_recv __user *precv64;
796 struct ipmi_recv recv64;
798 if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
799 return -EFAULT;
801 precv64 = compat_alloc_user_space(sizeof(recv64));
802 if (copy_to_user(precv64, &recv64, sizeof(recv64)))
803 return -EFAULT;
805 rc = ipmi_ioctl(filep->f_path.dentry->d_inode, filep,
806 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
807 ? IPMICTL_RECEIVE_MSG
808 : IPMICTL_RECEIVE_MSG_TRUNC),
809 (unsigned long) precv64);
810 if (rc != 0)
811 return rc;
813 if (copy_from_user(&recv64, precv64, sizeof(recv64)))
814 return -EFAULT;
816 if (put_compat_ipmi_recv(&recv64, compat_ptr(arg)))
817 return -EFAULT;
819 return rc;
821 default:
822 return ipmi_ioctl(filep->f_path.dentry->d_inode, filep, cmd, arg);
825 #endif
827 static const struct file_operations ipmi_fops = {
828 .owner = THIS_MODULE,
829 .ioctl = ipmi_ioctl,
830 #ifdef CONFIG_COMPAT
831 .compat_ioctl = compat_ipmi_ioctl,
832 #endif
833 .open = ipmi_open,
834 .release = ipmi_release,
835 .fasync = ipmi_fasync,
836 .poll = ipmi_poll,
839 #define DEVICE_NAME "ipmidev"
841 static int ipmi_major;
842 module_param(ipmi_major, int, 0);
843 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
844 " default, or if you set it to zero, it will choose the next"
845 " available device. Setting it to -1 will disable the"
846 " interface. Other values will set the major device number"
847 " to that value.");
849 /* Keep track of the devices that are registered. */
850 struct ipmi_reg_list {
851 dev_t dev;
852 struct list_head link;
854 static LIST_HEAD(reg_list);
855 static DEFINE_MUTEX(reg_list_mutex);
857 static struct class *ipmi_class;
859 static void ipmi_new_smi(int if_num, struct device *device)
861 dev_t dev = MKDEV(ipmi_major, if_num);
862 struct ipmi_reg_list *entry;
864 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
865 if (!entry) {
866 printk(KERN_ERR "ipmi_devintf: Unable to create the"
867 " ipmi class device link\n");
868 return;
870 entry->dev = dev;
872 mutex_lock(&reg_list_mutex);
873 device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
874 list_add(&entry->link, &reg_list);
875 mutex_unlock(&reg_list_mutex);
878 static void ipmi_smi_gone(int if_num)
880 dev_t dev = MKDEV(ipmi_major, if_num);
881 struct ipmi_reg_list *entry;
883 mutex_lock(&reg_list_mutex);
884 list_for_each_entry(entry, &reg_list, link) {
885 if (entry->dev == dev) {
886 list_del(&entry->link);
887 kfree(entry);
888 break;
891 device_destroy(ipmi_class, dev);
892 mutex_unlock(&reg_list_mutex);
895 static struct ipmi_smi_watcher smi_watcher =
897 .owner = THIS_MODULE,
898 .new_smi = ipmi_new_smi,
899 .smi_gone = ipmi_smi_gone,
902 static __init int init_ipmi_devintf(void)
904 int rv;
906 if (ipmi_major < 0)
907 return -EINVAL;
909 printk(KERN_INFO "ipmi device interface\n");
911 ipmi_class = class_create(THIS_MODULE, "ipmi");
912 if (IS_ERR(ipmi_class)) {
913 printk(KERN_ERR "ipmi: can't register device class\n");
914 return PTR_ERR(ipmi_class);
917 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
918 if (rv < 0) {
919 class_destroy(ipmi_class);
920 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
921 return rv;
924 if (ipmi_major == 0) {
925 ipmi_major = rv;
928 rv = ipmi_smi_watcher_register(&smi_watcher);
929 if (rv) {
930 unregister_chrdev(ipmi_major, DEVICE_NAME);
931 class_destroy(ipmi_class);
932 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
933 return rv;
936 return 0;
938 module_init(init_ipmi_devintf);
940 static __exit void cleanup_ipmi(void)
942 struct ipmi_reg_list *entry, *entry2;
943 mutex_lock(&reg_list_mutex);
944 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
945 list_del(&entry->link);
946 device_destroy(ipmi_class, entry->dev);
947 kfree(entry);
949 mutex_unlock(&reg_list_mutex);
950 class_destroy(ipmi_class);
951 ipmi_smi_watcher_unregister(&smi_watcher);
952 unregister_chrdev(ipmi_major, DEVICE_NAME);
954 module_exit(cleanup_ipmi);
956 MODULE_LICENSE("GPL");
957 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
958 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
959 MODULE_ALIAS("platform:ipmi_si");