WIP FPC-III support
[linux/fpc-iii.git] / drivers / char / ipmi / ipmi_devintf.c
blob3dd1d5abb298ac4f708585810fa2d3968bbb9326
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * ipmi_devintf.c
5 * Linux device interface for the IPMI message handler.
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
11 * Copyright 2002 MontaVista Software Inc.
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/errno.h>
17 #include <linux/poll.h>
18 #include <linux/sched.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/ipmi.h>
22 #include <linux/mutex.h>
23 #include <linux/init.h>
24 #include <linux/device.h>
25 #include <linux/compat.h>
27 struct ipmi_file_private
29 struct ipmi_user *user;
30 spinlock_t recv_msg_lock;
31 struct list_head recv_msgs;
32 struct fasync_struct *fasync_queue;
33 wait_queue_head_t wait;
34 struct mutex recv_mutex;
35 int default_retries;
36 unsigned int default_retry_time_ms;
39 static void file_receive_handler(struct ipmi_recv_msg *msg,
40 void *handler_data)
42 struct ipmi_file_private *priv = handler_data;
43 int was_empty;
44 unsigned long flags;
46 spin_lock_irqsave(&priv->recv_msg_lock, flags);
47 was_empty = list_empty(&priv->recv_msgs);
48 list_add_tail(&msg->link, &priv->recv_msgs);
49 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
51 if (was_empty) {
52 wake_up_interruptible(&priv->wait);
53 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
57 static __poll_t ipmi_poll(struct file *file, poll_table *wait)
59 struct ipmi_file_private *priv = file->private_data;
60 __poll_t mask = 0;
61 unsigned long flags;
63 poll_wait(file, &priv->wait, wait);
65 spin_lock_irqsave(&priv->recv_msg_lock, flags);
67 if (!list_empty(&priv->recv_msgs))
68 mask |= (EPOLLIN | EPOLLRDNORM);
70 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
72 return mask;
75 static int ipmi_fasync(int fd, struct file *file, int on)
77 struct ipmi_file_private *priv = file->private_data;
79 return fasync_helper(fd, file, on, &priv->fasync_queue);
82 static const struct ipmi_user_hndl ipmi_hndlrs =
84 .ipmi_recv_hndl = file_receive_handler,
87 static int ipmi_open(struct inode *inode, struct file *file)
89 int if_num = iminor(inode);
90 int rv;
91 struct ipmi_file_private *priv;
93 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
94 if (!priv)
95 return -ENOMEM;
97 rv = ipmi_create_user(if_num,
98 &ipmi_hndlrs,
99 priv,
100 &priv->user);
101 if (rv) {
102 kfree(priv);
103 goto out;
106 file->private_data = priv;
108 spin_lock_init(&priv->recv_msg_lock);
109 INIT_LIST_HEAD(&priv->recv_msgs);
110 init_waitqueue_head(&priv->wait);
111 priv->fasync_queue = NULL;
112 mutex_init(&priv->recv_mutex);
114 /* Use the low-level defaults. */
115 priv->default_retries = -1;
116 priv->default_retry_time_ms = 0;
118 out:
119 return rv;
122 static int ipmi_release(struct inode *inode, struct file *file)
124 struct ipmi_file_private *priv = file->private_data;
125 int rv;
126 struct ipmi_recv_msg *msg, *next;
128 rv = ipmi_destroy_user(priv->user);
129 if (rv)
130 return rv;
132 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
133 ipmi_free_recv_msg(msg);
135 kfree(priv);
137 return 0;
140 static int handle_send_req(struct ipmi_user *user,
141 struct ipmi_req *req,
142 int retries,
143 unsigned int retry_time_ms)
145 int rv;
146 struct ipmi_addr addr;
147 struct kernel_ipmi_msg msg;
149 if (req->addr_len > sizeof(struct ipmi_addr))
150 return -EINVAL;
152 if (copy_from_user(&addr, req->addr, req->addr_len))
153 return -EFAULT;
155 msg.netfn = req->msg.netfn;
156 msg.cmd = req->msg.cmd;
157 msg.data_len = req->msg.data_len;
158 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
159 if (!msg.data)
160 return -ENOMEM;
162 /* From here out we cannot return, we must jump to "out" for
163 error exits to free msgdata. */
165 rv = ipmi_validate_addr(&addr, req->addr_len);
166 if (rv)
167 goto out;
169 if (req->msg.data != NULL) {
170 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
171 rv = -EMSGSIZE;
172 goto out;
175 if (copy_from_user(msg.data,
176 req->msg.data,
177 req->msg.data_len)) {
178 rv = -EFAULT;
179 goto out;
181 } else {
182 msg.data_len = 0;
185 rv = ipmi_request_settime(user,
186 &addr,
187 req->msgid,
188 &msg,
189 NULL,
191 retries,
192 retry_time_ms);
193 out:
194 kfree(msg.data);
195 return rv;
198 static int handle_recv(struct ipmi_file_private *priv,
199 bool trunc, struct ipmi_recv *rsp,
200 int (*copyout)(struct ipmi_recv *, void __user *),
201 void __user *to)
203 int addr_len;
204 struct list_head *entry;
205 struct ipmi_recv_msg *msg;
206 unsigned long flags;
207 int rv = 0, rv2 = 0;
209 /* We claim a mutex because we don't want two
210 users getting something from the queue at a time.
211 Since we have to release the spinlock before we can
212 copy the data to the user, it's possible another
213 user will grab something from the queue, too. Then
214 the messages might get out of order if something
215 fails and the message gets put back onto the
216 queue. This mutex prevents that problem. */
217 mutex_lock(&priv->recv_mutex);
219 /* Grab the message off the list. */
220 spin_lock_irqsave(&priv->recv_msg_lock, flags);
221 if (list_empty(&(priv->recv_msgs))) {
222 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
223 rv = -EAGAIN;
224 goto recv_err;
226 entry = priv->recv_msgs.next;
227 msg = list_entry(entry, struct ipmi_recv_msg, link);
228 list_del(entry);
229 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
231 addr_len = ipmi_addr_length(msg->addr.addr_type);
232 if (rsp->addr_len < addr_len) {
233 rv = -EINVAL;
234 goto recv_putback_on_err;
237 if (copy_to_user(rsp->addr, &msg->addr, addr_len)) {
238 rv = -EFAULT;
239 goto recv_putback_on_err;
241 rsp->addr_len = addr_len;
243 rsp->recv_type = msg->recv_type;
244 rsp->msgid = msg->msgid;
245 rsp->msg.netfn = msg->msg.netfn;
246 rsp->msg.cmd = msg->msg.cmd;
248 if (msg->msg.data_len > 0) {
249 if (rsp->msg.data_len < msg->msg.data_len) {
250 rv2 = -EMSGSIZE;
251 if (trunc)
252 msg->msg.data_len = rsp->msg.data_len;
253 else
254 goto recv_putback_on_err;
257 if (copy_to_user(rsp->msg.data,
258 msg->msg.data,
259 msg->msg.data_len)) {
260 rv = -EFAULT;
261 goto recv_putback_on_err;
263 rsp->msg.data_len = msg->msg.data_len;
264 } else {
265 rsp->msg.data_len = 0;
268 rv = copyout(rsp, to);
269 if (rv)
270 goto recv_putback_on_err;
272 mutex_unlock(&priv->recv_mutex);
273 ipmi_free_recv_msg(msg);
274 return rv2;
276 recv_putback_on_err:
277 /* If we got an error, put the message back onto
278 the head of the queue. */
279 spin_lock_irqsave(&priv->recv_msg_lock, flags);
280 list_add(entry, &priv->recv_msgs);
281 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
282 recv_err:
283 mutex_unlock(&priv->recv_mutex);
284 return rv;
287 static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
289 return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
292 static long ipmi_ioctl(struct file *file,
293 unsigned int cmd,
294 unsigned long data)
296 int rv = -EINVAL;
297 struct ipmi_file_private *priv = file->private_data;
298 void __user *arg = (void __user *)data;
300 switch (cmd)
302 case IPMICTL_SEND_COMMAND:
304 struct ipmi_req req;
305 int retries;
306 unsigned int retry_time_ms;
308 if (copy_from_user(&req, arg, sizeof(req))) {
309 rv = -EFAULT;
310 break;
313 mutex_lock(&priv->recv_mutex);
314 retries = priv->default_retries;
315 retry_time_ms = priv->default_retry_time_ms;
316 mutex_unlock(&priv->recv_mutex);
318 rv = handle_send_req(priv->user, &req, retries, retry_time_ms);
319 break;
322 case IPMICTL_SEND_COMMAND_SETTIME:
324 struct ipmi_req_settime req;
326 if (copy_from_user(&req, arg, sizeof(req))) {
327 rv = -EFAULT;
328 break;
331 rv = handle_send_req(priv->user,
332 &req.req,
333 req.retries,
334 req.retry_time_ms);
335 break;
338 case IPMICTL_RECEIVE_MSG:
339 case IPMICTL_RECEIVE_MSG_TRUNC:
341 struct ipmi_recv rsp;
343 if (copy_from_user(&rsp, arg, sizeof(rsp)))
344 rv = -EFAULT;
345 else
346 rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
347 &rsp, copyout_recv, arg);
348 break;
351 case IPMICTL_REGISTER_FOR_CMD:
353 struct ipmi_cmdspec val;
355 if (copy_from_user(&val, arg, sizeof(val))) {
356 rv = -EFAULT;
357 break;
360 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
361 IPMI_CHAN_ALL);
362 break;
365 case IPMICTL_UNREGISTER_FOR_CMD:
367 struct ipmi_cmdspec val;
369 if (copy_from_user(&val, arg, sizeof(val))) {
370 rv = -EFAULT;
371 break;
374 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
375 IPMI_CHAN_ALL);
376 break;
379 case IPMICTL_REGISTER_FOR_CMD_CHANS:
381 struct ipmi_cmdspec_chans val;
383 if (copy_from_user(&val, arg, sizeof(val))) {
384 rv = -EFAULT;
385 break;
388 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
389 val.chans);
390 break;
393 case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
395 struct ipmi_cmdspec_chans val;
397 if (copy_from_user(&val, arg, sizeof(val))) {
398 rv = -EFAULT;
399 break;
402 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
403 val.chans);
404 break;
407 case IPMICTL_SET_GETS_EVENTS_CMD:
409 int val;
411 if (copy_from_user(&val, arg, sizeof(val))) {
412 rv = -EFAULT;
413 break;
416 rv = ipmi_set_gets_events(priv->user, val);
417 break;
420 /* The next four are legacy, not per-channel. */
421 case IPMICTL_SET_MY_ADDRESS_CMD:
423 unsigned int val;
425 if (copy_from_user(&val, arg, sizeof(val))) {
426 rv = -EFAULT;
427 break;
430 rv = ipmi_set_my_address(priv->user, 0, val);
431 break;
434 case IPMICTL_GET_MY_ADDRESS_CMD:
436 unsigned int val;
437 unsigned char rval;
439 rv = ipmi_get_my_address(priv->user, 0, &rval);
440 if (rv)
441 break;
443 val = rval;
445 if (copy_to_user(arg, &val, sizeof(val))) {
446 rv = -EFAULT;
447 break;
449 break;
452 case IPMICTL_SET_MY_LUN_CMD:
454 unsigned int val;
456 if (copy_from_user(&val, arg, sizeof(val))) {
457 rv = -EFAULT;
458 break;
461 rv = ipmi_set_my_LUN(priv->user, 0, val);
462 break;
465 case IPMICTL_GET_MY_LUN_CMD:
467 unsigned int val;
468 unsigned char rval;
470 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
471 if (rv)
472 break;
474 val = rval;
476 if (copy_to_user(arg, &val, sizeof(val))) {
477 rv = -EFAULT;
478 break;
480 break;
483 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
485 struct ipmi_channel_lun_address_set val;
487 if (copy_from_user(&val, arg, sizeof(val))) {
488 rv = -EFAULT;
489 break;
492 return ipmi_set_my_address(priv->user, val.channel, val.value);
495 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
497 struct ipmi_channel_lun_address_set val;
499 if (copy_from_user(&val, arg, sizeof(val))) {
500 rv = -EFAULT;
501 break;
504 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
505 if (rv)
506 break;
508 if (copy_to_user(arg, &val, sizeof(val))) {
509 rv = -EFAULT;
510 break;
512 break;
515 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
517 struct ipmi_channel_lun_address_set val;
519 if (copy_from_user(&val, arg, sizeof(val))) {
520 rv = -EFAULT;
521 break;
524 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
525 break;
528 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
530 struct ipmi_channel_lun_address_set val;
532 if (copy_from_user(&val, arg, sizeof(val))) {
533 rv = -EFAULT;
534 break;
537 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
538 if (rv)
539 break;
541 if (copy_to_user(arg, &val, sizeof(val))) {
542 rv = -EFAULT;
543 break;
545 break;
548 case IPMICTL_SET_TIMING_PARMS_CMD:
550 struct ipmi_timing_parms parms;
552 if (copy_from_user(&parms, arg, sizeof(parms))) {
553 rv = -EFAULT;
554 break;
557 mutex_lock(&priv->recv_mutex);
558 priv->default_retries = parms.retries;
559 priv->default_retry_time_ms = parms.retry_time_ms;
560 mutex_unlock(&priv->recv_mutex);
561 rv = 0;
562 break;
565 case IPMICTL_GET_TIMING_PARMS_CMD:
567 struct ipmi_timing_parms parms;
569 mutex_lock(&priv->recv_mutex);
570 parms.retries = priv->default_retries;
571 parms.retry_time_ms = priv->default_retry_time_ms;
572 mutex_unlock(&priv->recv_mutex);
574 if (copy_to_user(arg, &parms, sizeof(parms))) {
575 rv = -EFAULT;
576 break;
579 rv = 0;
580 break;
583 case IPMICTL_GET_MAINTENANCE_MODE_CMD:
585 int mode;
587 mode = ipmi_get_maintenance_mode(priv->user);
588 if (copy_to_user(arg, &mode, sizeof(mode))) {
589 rv = -EFAULT;
590 break;
592 rv = 0;
593 break;
596 case IPMICTL_SET_MAINTENANCE_MODE_CMD:
598 int mode;
600 if (copy_from_user(&mode, arg, sizeof(mode))) {
601 rv = -EFAULT;
602 break;
604 rv = ipmi_set_maintenance_mode(priv->user, mode);
605 break;
608 default:
609 rv = -ENOTTY;
610 break;
613 return rv;
616 #ifdef CONFIG_COMPAT
618 * The following code contains code for supporting 32-bit compatible
619 * ioctls on 64-bit kernels. This allows running 32-bit apps on the
620 * 64-bit kernel
622 #define COMPAT_IPMICTL_SEND_COMMAND \
623 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
624 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \
625 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
626 #define COMPAT_IPMICTL_RECEIVE_MSG \
627 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
628 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \
629 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
631 struct compat_ipmi_msg {
632 u8 netfn;
633 u8 cmd;
634 u16 data_len;
635 compat_uptr_t data;
638 struct compat_ipmi_req {
639 compat_uptr_t addr;
640 compat_uint_t addr_len;
641 compat_long_t msgid;
642 struct compat_ipmi_msg msg;
645 struct compat_ipmi_recv {
646 compat_int_t recv_type;
647 compat_uptr_t addr;
648 compat_uint_t addr_len;
649 compat_long_t msgid;
650 struct compat_ipmi_msg msg;
653 struct compat_ipmi_req_settime {
654 struct compat_ipmi_req req;
655 compat_int_t retries;
656 compat_uint_t retry_time_ms;
660 * Define some helper functions for copying IPMI data
662 static void get_compat_ipmi_msg(struct ipmi_msg *p64,
663 struct compat_ipmi_msg *p32)
665 p64->netfn = p32->netfn;
666 p64->cmd = p32->cmd;
667 p64->data_len = p32->data_len;
668 p64->data = compat_ptr(p32->data);
671 static void get_compat_ipmi_req(struct ipmi_req *p64,
672 struct compat_ipmi_req *p32)
674 p64->addr = compat_ptr(p32->addr);
675 p64->addr_len = p32->addr_len;
676 p64->msgid = p32->msgid;
677 get_compat_ipmi_msg(&p64->msg, &p32->msg);
680 static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
681 struct compat_ipmi_req_settime *p32)
683 get_compat_ipmi_req(&p64->req, &p32->req);
684 p64->retries = p32->retries;
685 p64->retry_time_ms = p32->retry_time_ms;
688 static void get_compat_ipmi_recv(struct ipmi_recv *p64,
689 struct compat_ipmi_recv *p32)
691 memset(p64, 0, sizeof(struct ipmi_recv));
692 p64->recv_type = p32->recv_type;
693 p64->addr = compat_ptr(p32->addr);
694 p64->addr_len = p32->addr_len;
695 p64->msgid = p32->msgid;
696 get_compat_ipmi_msg(&p64->msg, &p32->msg);
699 static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
701 struct compat_ipmi_recv v32;
702 memset(&v32, 0, sizeof(struct compat_ipmi_recv));
703 v32.recv_type = p64->recv_type;
704 v32.addr = ptr_to_compat(p64->addr);
705 v32.addr_len = p64->addr_len;
706 v32.msgid = p64->msgid;
707 v32.msg.netfn = p64->msg.netfn;
708 v32.msg.cmd = p64->msg.cmd;
709 v32.msg.data_len = p64->msg.data_len;
710 v32.msg.data = ptr_to_compat(p64->msg.data);
711 return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
715 * Handle compatibility ioctls
717 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
718 unsigned long arg)
720 struct ipmi_file_private *priv = filep->private_data;
722 switch(cmd) {
723 case COMPAT_IPMICTL_SEND_COMMAND:
725 struct ipmi_req rp;
726 struct compat_ipmi_req r32;
727 int retries;
728 unsigned int retry_time_ms;
730 if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
731 return -EFAULT;
733 get_compat_ipmi_req(&rp, &r32);
735 mutex_lock(&priv->recv_mutex);
736 retries = priv->default_retries;
737 retry_time_ms = priv->default_retry_time_ms;
738 mutex_unlock(&priv->recv_mutex);
740 return handle_send_req(priv->user, &rp,
741 retries, retry_time_ms);
743 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
745 struct ipmi_req_settime sp;
746 struct compat_ipmi_req_settime sp32;
748 if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
749 return -EFAULT;
751 get_compat_ipmi_req_settime(&sp, &sp32);
753 return handle_send_req(priv->user, &sp.req,
754 sp.retries, sp.retry_time_ms);
756 case COMPAT_IPMICTL_RECEIVE_MSG:
757 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
759 struct ipmi_recv recv64;
760 struct compat_ipmi_recv recv32;
762 if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
763 return -EFAULT;
765 get_compat_ipmi_recv(&recv64, &recv32);
767 return handle_recv(priv,
768 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
769 &recv64, copyout_recv32, compat_ptr(arg));
771 default:
772 return ipmi_ioctl(filep, cmd, arg);
775 #endif
777 static const struct file_operations ipmi_fops = {
778 .owner = THIS_MODULE,
779 .unlocked_ioctl = ipmi_ioctl,
780 #ifdef CONFIG_COMPAT
781 .compat_ioctl = compat_ipmi_ioctl,
782 #endif
783 .open = ipmi_open,
784 .release = ipmi_release,
785 .fasync = ipmi_fasync,
786 .poll = ipmi_poll,
787 .llseek = noop_llseek,
790 #define DEVICE_NAME "ipmidev"
792 static int ipmi_major;
793 module_param(ipmi_major, int, 0);
794 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
795 " default, or if you set it to zero, it will choose the next"
796 " available device. Setting it to -1 will disable the"
797 " interface. Other values will set the major device number"
798 " to that value.");
800 /* Keep track of the devices that are registered. */
801 struct ipmi_reg_list {
802 dev_t dev;
803 struct list_head link;
805 static LIST_HEAD(reg_list);
806 static DEFINE_MUTEX(reg_list_mutex);
808 static struct class *ipmi_class;
810 static void ipmi_new_smi(int if_num, struct device *device)
812 dev_t dev = MKDEV(ipmi_major, if_num);
813 struct ipmi_reg_list *entry;
815 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
816 if (!entry) {
817 pr_err("ipmi_devintf: Unable to create the ipmi class device link\n");
818 return;
820 entry->dev = dev;
822 mutex_lock(&reg_list_mutex);
823 device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num);
824 list_add(&entry->link, &reg_list);
825 mutex_unlock(&reg_list_mutex);
828 static void ipmi_smi_gone(int if_num)
830 dev_t dev = MKDEV(ipmi_major, if_num);
831 struct ipmi_reg_list *entry;
833 mutex_lock(&reg_list_mutex);
834 list_for_each_entry(entry, &reg_list, link) {
835 if (entry->dev == dev) {
836 list_del(&entry->link);
837 kfree(entry);
838 break;
841 device_destroy(ipmi_class, dev);
842 mutex_unlock(&reg_list_mutex);
845 static struct ipmi_smi_watcher smi_watcher =
847 .owner = THIS_MODULE,
848 .new_smi = ipmi_new_smi,
849 .smi_gone = ipmi_smi_gone,
852 static int __init init_ipmi_devintf(void)
854 int rv;
856 if (ipmi_major < 0)
857 return -EINVAL;
859 pr_info("ipmi device interface\n");
861 ipmi_class = class_create(THIS_MODULE, "ipmi");
862 if (IS_ERR(ipmi_class)) {
863 pr_err("ipmi: can't register device class\n");
864 return PTR_ERR(ipmi_class);
867 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
868 if (rv < 0) {
869 class_destroy(ipmi_class);
870 pr_err("ipmi: can't get major %d\n", ipmi_major);
871 return rv;
874 if (ipmi_major == 0) {
875 ipmi_major = rv;
878 rv = ipmi_smi_watcher_register(&smi_watcher);
879 if (rv) {
880 unregister_chrdev(ipmi_major, DEVICE_NAME);
881 class_destroy(ipmi_class);
882 pr_warn("ipmi: can't register smi watcher\n");
883 return rv;
886 return 0;
888 module_init(init_ipmi_devintf);
890 static void __exit cleanup_ipmi(void)
892 struct ipmi_reg_list *entry, *entry2;
893 mutex_lock(&reg_list_mutex);
894 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
895 list_del(&entry->link);
896 device_destroy(ipmi_class, entry->dev);
897 kfree(entry);
899 mutex_unlock(&reg_list_mutex);
900 class_destroy(ipmi_class);
901 ipmi_smi_watcher_unregister(&smi_watcher);
902 unregister_chrdev(ipmi_major, DEVICE_NAME);
904 module_exit(cleanup_ipmi);
906 MODULE_LICENSE("GPL");
907 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
908 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");