Linux 2.6.20.7
[linux/fpc-iii.git] / drivers / char / ipmi / ipmi_msghandler.c
blob53582b53da95c449a9cb730f1b93734793139c30
1 /*
2 * ipmi_msghandler.c
4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "39.1"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized;
58 #ifdef CONFIG_PROC_FS
59 static struct proc_dir_entry *proc_ipmi_root;
60 #endif /* CONFIG_PROC_FS */
62 /* Remain in auto-maintenance mode for this amount of time (in ms). */
63 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
65 #define MAX_EVENTS_IN_QUEUE 25
67 /* Don't let a message sit in a queue forever, always time it with at lest
68 the max message timer. This is in milliseconds. */
69 #define MAX_MSG_TIMEOUT 60000
73 * The main "user" data structure.
75 struct ipmi_user
77 struct list_head link;
79 /* Set to "0" when the user is destroyed. */
80 int valid;
82 struct kref refcount;
84 /* The upper layer that handles receive messages. */
85 struct ipmi_user_hndl *handler;
86 void *handler_data;
88 /* The interface this user is bound to. */
89 ipmi_smi_t intf;
91 /* Does this interface receive IPMI events? */
92 int gets_events;
95 struct cmd_rcvr
97 struct list_head link;
99 ipmi_user_t user;
100 unsigned char netfn;
101 unsigned char cmd;
102 unsigned int chans;
105 * This is used to form a linked lised during mass deletion.
106 * Since this is in an RCU list, we cannot use the link above
107 * or change any data until the RCU period completes. So we
108 * use this next variable during mass deletion so we can have
109 * a list and don't have to wait and restart the search on
110 * every individual deletion of a command. */
111 struct cmd_rcvr *next;
114 struct seq_table
116 unsigned int inuse : 1;
117 unsigned int broadcast : 1;
119 unsigned long timeout;
120 unsigned long orig_timeout;
121 unsigned int retries_left;
123 /* To verify on an incoming send message response that this is
124 the message that the response is for, we keep a sequence id
125 and increment it every time we send a message. */
126 long seqid;
128 /* This is held so we can properly respond to the message on a
129 timeout, and it is used to hold the temporary data for
130 retransmission, too. */
131 struct ipmi_recv_msg *recv_msg;
134 /* Store the information in a msgid (long) to allow us to find a
135 sequence table entry from the msgid. */
136 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
138 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
139 do { \
140 seq = ((msgid >> 26) & 0x3f); \
141 seqid = (msgid & 0x3fffff); \
142 } while (0)
144 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
146 struct ipmi_channel
148 unsigned char medium;
149 unsigned char protocol;
151 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
152 but may be changed by the user. */
153 unsigned char address;
155 /* My LUN. This should generally stay the SMS LUN, but just in
156 case... */
157 unsigned char lun;
160 #ifdef CONFIG_PROC_FS
161 struct ipmi_proc_entry
163 char *name;
164 struct ipmi_proc_entry *next;
166 #endif
168 struct bmc_device
170 struct platform_device *dev;
171 struct ipmi_device_id id;
172 unsigned char guid[16];
173 int guid_set;
175 struct kref refcount;
177 /* bmc device attributes */
178 struct device_attribute device_id_attr;
179 struct device_attribute provides_dev_sdrs_attr;
180 struct device_attribute revision_attr;
181 struct device_attribute firmware_rev_attr;
182 struct device_attribute version_attr;
183 struct device_attribute add_dev_support_attr;
184 struct device_attribute manufacturer_id_attr;
185 struct device_attribute product_id_attr;
186 struct device_attribute guid_attr;
187 struct device_attribute aux_firmware_rev_attr;
190 #define IPMI_IPMB_NUM_SEQ 64
191 #define IPMI_MAX_CHANNELS 16
192 struct ipmi_smi
194 /* What interface number are we? */
195 int intf_num;
197 struct kref refcount;
199 /* Used for a list of interfaces. */
200 struct list_head link;
202 /* The list of upper layers that are using me. seq_lock
203 * protects this. */
204 struct list_head users;
206 /* Information to supply to users. */
207 unsigned char ipmi_version_major;
208 unsigned char ipmi_version_minor;
210 /* Used for wake ups at startup. */
211 wait_queue_head_t waitq;
213 struct bmc_device *bmc;
214 char *my_dev_name;
215 char *sysfs_name;
217 /* This is the lower-layer's sender routine. Note that you
218 * must either be holding the ipmi_interfaces_mutex or be in
219 * an umpreemptible region to use this. You must fetch the
220 * value into a local variable and make sure it is not NULL. */
221 struct ipmi_smi_handlers *handlers;
222 void *send_info;
224 #ifdef CONFIG_PROC_FS
225 /* A list of proc entries for this interface. This does not
226 need a lock, only one thread creates it and only one thread
227 destroys it. */
228 spinlock_t proc_entry_lock;
229 struct ipmi_proc_entry *proc_entries;
230 #endif
232 /* Driver-model device for the system interface. */
233 struct device *si_dev;
235 /* A table of sequence numbers for this interface. We use the
236 sequence numbers for IPMB messages that go out of the
237 interface to match them up with their responses. A routine
238 is called periodically to time the items in this list. */
239 spinlock_t seq_lock;
240 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
241 int curr_seq;
243 /* Messages that were delayed for some reason (out of memory,
244 for instance), will go in here to be processed later in a
245 periodic timer interrupt. */
246 spinlock_t waiting_msgs_lock;
247 struct list_head waiting_msgs;
249 /* The list of command receivers that are registered for commands
250 on this interface. */
251 struct mutex cmd_rcvrs_mutex;
252 struct list_head cmd_rcvrs;
254 /* Events that were queues because no one was there to receive
255 them. */
256 spinlock_t events_lock; /* For dealing with event stuff. */
257 struct list_head waiting_events;
258 unsigned int waiting_events_count; /* How many events in queue? */
259 int delivering_events;
261 /* The event receiver for my BMC, only really used at panic
262 shutdown as a place to store this. */
263 unsigned char event_receiver;
264 unsigned char event_receiver_lun;
265 unsigned char local_sel_device;
266 unsigned char local_event_generator;
268 /* For handling of maintenance mode. */
269 int maintenance_mode;
270 int maintenance_mode_enable;
271 int auto_maintenance_timeout;
272 spinlock_t maintenance_mode_lock; /* Used in a timer... */
274 /* A cheap hack, if this is non-null and a message to an
275 interface comes in with a NULL user, call this routine with
276 it. Note that the message will still be freed by the
277 caller. This only works on the system interface. */
278 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
280 /* When we are scanning the channels for an SMI, this will
281 tell which channel we are scanning. */
282 int curr_channel;
284 /* Channel information */
285 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
287 /* Proc FS stuff. */
288 struct proc_dir_entry *proc_dir;
289 char proc_dir_name[10];
291 spinlock_t counter_lock; /* For making counters atomic. */
293 /* Commands we got that were invalid. */
294 unsigned int sent_invalid_commands;
296 /* Commands we sent to the MC. */
297 unsigned int sent_local_commands;
298 /* Responses from the MC that were delivered to a user. */
299 unsigned int handled_local_responses;
300 /* Responses from the MC that were not delivered to a user. */
301 unsigned int unhandled_local_responses;
303 /* Commands we sent out to the IPMB bus. */
304 unsigned int sent_ipmb_commands;
305 /* Commands sent on the IPMB that had errors on the SEND CMD */
306 unsigned int sent_ipmb_command_errs;
307 /* Each retransmit increments this count. */
308 unsigned int retransmitted_ipmb_commands;
309 /* When a message times out (runs out of retransmits) this is
310 incremented. */
311 unsigned int timed_out_ipmb_commands;
313 /* This is like above, but for broadcasts. Broadcasts are
314 *not* included in the above count (they are expected to
315 time out). */
316 unsigned int timed_out_ipmb_broadcasts;
318 /* Responses I have sent to the IPMB bus. */
319 unsigned int sent_ipmb_responses;
321 /* The response was delivered to the user. */
322 unsigned int handled_ipmb_responses;
323 /* The response had invalid data in it. */
324 unsigned int invalid_ipmb_responses;
325 /* The response didn't have anyone waiting for it. */
326 unsigned int unhandled_ipmb_responses;
328 /* Commands we sent out to the IPMB bus. */
329 unsigned int sent_lan_commands;
330 /* Commands sent on the IPMB that had errors on the SEND CMD */
331 unsigned int sent_lan_command_errs;
332 /* Each retransmit increments this count. */
333 unsigned int retransmitted_lan_commands;
334 /* When a message times out (runs out of retransmits) this is
335 incremented. */
336 unsigned int timed_out_lan_commands;
338 /* Responses I have sent to the IPMB bus. */
339 unsigned int sent_lan_responses;
341 /* The response was delivered to the user. */
342 unsigned int handled_lan_responses;
343 /* The response had invalid data in it. */
344 unsigned int invalid_lan_responses;
345 /* The response didn't have anyone waiting for it. */
346 unsigned int unhandled_lan_responses;
348 /* The command was delivered to the user. */
349 unsigned int handled_commands;
350 /* The command had invalid data in it. */
351 unsigned int invalid_commands;
352 /* The command didn't have anyone waiting for it. */
353 unsigned int unhandled_commands;
355 /* Invalid data in an event. */
356 unsigned int invalid_events;
357 /* Events that were received with the proper format. */
358 unsigned int events;
360 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
363 * The driver model view of the IPMI messaging driver.
365 static struct device_driver ipmidriver = {
366 .name = "ipmi",
367 .bus = &platform_bus_type
369 static DEFINE_MUTEX(ipmidriver_mutex);
371 static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
372 static DEFINE_MUTEX(ipmi_interfaces_mutex);
374 /* List of watchers that want to know when smi's are added and
375 deleted. */
376 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
377 static DEFINE_MUTEX(smi_watchers_mutex);
380 static void free_recv_msg_list(struct list_head *q)
382 struct ipmi_recv_msg *msg, *msg2;
384 list_for_each_entry_safe(msg, msg2, q, link) {
385 list_del(&msg->link);
386 ipmi_free_recv_msg(msg);
390 static void free_smi_msg_list(struct list_head *q)
392 struct ipmi_smi_msg *msg, *msg2;
394 list_for_each_entry_safe(msg, msg2, q, link) {
395 list_del(&msg->link);
396 ipmi_free_smi_msg(msg);
400 static void clean_up_interface_data(ipmi_smi_t intf)
402 int i;
403 struct cmd_rcvr *rcvr, *rcvr2;
404 struct list_head list;
406 free_smi_msg_list(&intf->waiting_msgs);
407 free_recv_msg_list(&intf->waiting_events);
409 /* Wholesale remove all the entries from the list in the
410 * interface and wait for RCU to know that none are in use. */
411 mutex_lock(&intf->cmd_rcvrs_mutex);
412 list_add_rcu(&list, &intf->cmd_rcvrs);
413 list_del_rcu(&intf->cmd_rcvrs);
414 mutex_unlock(&intf->cmd_rcvrs_mutex);
415 synchronize_rcu();
417 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
418 kfree(rcvr);
420 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
421 if ((intf->seq_table[i].inuse)
422 && (intf->seq_table[i].recv_msg))
424 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
429 static void intf_free(struct kref *ref)
431 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
433 clean_up_interface_data(intf);
434 kfree(intf);
437 struct watcher_entry {
438 int intf_num;
439 ipmi_smi_t intf;
440 struct list_head link;
443 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
445 ipmi_smi_t intf;
446 struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
447 struct watcher_entry *e, *e2;
449 mutex_lock(&smi_watchers_mutex);
451 mutex_lock(&ipmi_interfaces_mutex);
453 /* Build a list of things to deliver. */
454 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
455 if (intf->intf_num == -1)
456 continue;
457 e = kmalloc(sizeof(*e), GFP_KERNEL);
458 if (!e)
459 goto out_err;
460 kref_get(&intf->refcount);
461 e->intf = intf;
462 e->intf_num = intf->intf_num;
463 list_add_tail(&e->link, &to_deliver);
466 /* We will succeed, so add it to the list. */
467 list_add(&watcher->link, &smi_watchers);
469 mutex_unlock(&ipmi_interfaces_mutex);
471 list_for_each_entry_safe(e, e2, &to_deliver, link) {
472 list_del(&e->link);
473 watcher->new_smi(e->intf_num, e->intf->si_dev);
474 kref_put(&e->intf->refcount, intf_free);
475 kfree(e);
478 mutex_unlock(&smi_watchers_mutex);
480 return 0;
482 out_err:
483 mutex_unlock(&ipmi_interfaces_mutex);
484 mutex_unlock(&smi_watchers_mutex);
485 list_for_each_entry_safe(e, e2, &to_deliver, link) {
486 list_del(&e->link);
487 kref_put(&e->intf->refcount, intf_free);
488 kfree(e);
490 return -ENOMEM;
493 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
495 mutex_lock(&smi_watchers_mutex);
496 list_del(&(watcher->link));
497 mutex_unlock(&smi_watchers_mutex);
498 return 0;
502 * Must be called with smi_watchers_mutex held.
504 static void
505 call_smi_watchers(int i, struct device *dev)
507 struct ipmi_smi_watcher *w;
509 list_for_each_entry(w, &smi_watchers, link) {
510 if (try_module_get(w->owner)) {
511 w->new_smi(i, dev);
512 module_put(w->owner);
517 static int
518 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
520 if (addr1->addr_type != addr2->addr_type)
521 return 0;
523 if (addr1->channel != addr2->channel)
524 return 0;
526 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
527 struct ipmi_system_interface_addr *smi_addr1
528 = (struct ipmi_system_interface_addr *) addr1;
529 struct ipmi_system_interface_addr *smi_addr2
530 = (struct ipmi_system_interface_addr *) addr2;
531 return (smi_addr1->lun == smi_addr2->lun);
534 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
535 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
537 struct ipmi_ipmb_addr *ipmb_addr1
538 = (struct ipmi_ipmb_addr *) addr1;
539 struct ipmi_ipmb_addr *ipmb_addr2
540 = (struct ipmi_ipmb_addr *) addr2;
542 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
543 && (ipmb_addr1->lun == ipmb_addr2->lun));
546 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
547 struct ipmi_lan_addr *lan_addr1
548 = (struct ipmi_lan_addr *) addr1;
549 struct ipmi_lan_addr *lan_addr2
550 = (struct ipmi_lan_addr *) addr2;
552 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
553 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
554 && (lan_addr1->session_handle
555 == lan_addr2->session_handle)
556 && (lan_addr1->lun == lan_addr2->lun));
559 return 1;
562 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
564 if (len < sizeof(struct ipmi_system_interface_addr)) {
565 return -EINVAL;
568 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
569 if (addr->channel != IPMI_BMC_CHANNEL)
570 return -EINVAL;
571 return 0;
574 if ((addr->channel == IPMI_BMC_CHANNEL)
575 || (addr->channel >= IPMI_MAX_CHANNELS)
576 || (addr->channel < 0))
577 return -EINVAL;
579 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
580 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
582 if (len < sizeof(struct ipmi_ipmb_addr)) {
583 return -EINVAL;
585 return 0;
588 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
589 if (len < sizeof(struct ipmi_lan_addr)) {
590 return -EINVAL;
592 return 0;
595 return -EINVAL;
598 unsigned int ipmi_addr_length(int addr_type)
600 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
601 return sizeof(struct ipmi_system_interface_addr);
603 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
604 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
606 return sizeof(struct ipmi_ipmb_addr);
609 if (addr_type == IPMI_LAN_ADDR_TYPE)
610 return sizeof(struct ipmi_lan_addr);
612 return 0;
615 static void deliver_response(struct ipmi_recv_msg *msg)
617 if (!msg->user) {
618 ipmi_smi_t intf = msg->user_msg_data;
619 unsigned long flags;
621 /* Special handling for NULL users. */
622 if (intf->null_user_handler) {
623 intf->null_user_handler(intf, msg);
624 spin_lock_irqsave(&intf->counter_lock, flags);
625 intf->handled_local_responses++;
626 spin_unlock_irqrestore(&intf->counter_lock, flags);
627 } else {
628 /* No handler, so give up. */
629 spin_lock_irqsave(&intf->counter_lock, flags);
630 intf->unhandled_local_responses++;
631 spin_unlock_irqrestore(&intf->counter_lock, flags);
633 ipmi_free_recv_msg(msg);
634 } else {
635 ipmi_user_t user = msg->user;
636 user->handler->ipmi_recv_hndl(msg, user->handler_data);
640 static void
641 deliver_err_response(struct ipmi_recv_msg *msg, int err)
643 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
644 msg->msg_data[0] = err;
645 msg->msg.netfn |= 1; /* Convert to a response. */
646 msg->msg.data_len = 1;
647 msg->msg.data = msg->msg_data;
648 deliver_response(msg);
651 /* Find the next sequence number not being used and add the given
652 message with the given timeout to the sequence table. This must be
653 called with the interface's seq_lock held. */
654 static int intf_next_seq(ipmi_smi_t intf,
655 struct ipmi_recv_msg *recv_msg,
656 unsigned long timeout,
657 int retries,
658 int broadcast,
659 unsigned char *seq,
660 long *seqid)
662 int rv = 0;
663 unsigned int i;
665 for (i = intf->curr_seq;
666 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
667 i = (i+1)%IPMI_IPMB_NUM_SEQ)
669 if (!intf->seq_table[i].inuse)
670 break;
673 if (!intf->seq_table[i].inuse) {
674 intf->seq_table[i].recv_msg = recv_msg;
676 /* Start with the maximum timeout, when the send response
677 comes in we will start the real timer. */
678 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
679 intf->seq_table[i].orig_timeout = timeout;
680 intf->seq_table[i].retries_left = retries;
681 intf->seq_table[i].broadcast = broadcast;
682 intf->seq_table[i].inuse = 1;
683 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
684 *seq = i;
685 *seqid = intf->seq_table[i].seqid;
686 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
687 } else {
688 rv = -EAGAIN;
691 return rv;
694 /* Return the receive message for the given sequence number and
695 release the sequence number so it can be reused. Some other data
696 is passed in to be sure the message matches up correctly (to help
697 guard against message coming in after their timeout and the
698 sequence number being reused). */
699 static int intf_find_seq(ipmi_smi_t intf,
700 unsigned char seq,
701 short channel,
702 unsigned char cmd,
703 unsigned char netfn,
704 struct ipmi_addr *addr,
705 struct ipmi_recv_msg **recv_msg)
707 int rv = -ENODEV;
708 unsigned long flags;
710 if (seq >= IPMI_IPMB_NUM_SEQ)
711 return -EINVAL;
713 spin_lock_irqsave(&(intf->seq_lock), flags);
714 if (intf->seq_table[seq].inuse) {
715 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
717 if ((msg->addr.channel == channel)
718 && (msg->msg.cmd == cmd)
719 && (msg->msg.netfn == netfn)
720 && (ipmi_addr_equal(addr, &(msg->addr))))
722 *recv_msg = msg;
723 intf->seq_table[seq].inuse = 0;
724 rv = 0;
727 spin_unlock_irqrestore(&(intf->seq_lock), flags);
729 return rv;
733 /* Start the timer for a specific sequence table entry. */
734 static int intf_start_seq_timer(ipmi_smi_t intf,
735 long msgid)
737 int rv = -ENODEV;
738 unsigned long flags;
739 unsigned char seq;
740 unsigned long seqid;
743 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
745 spin_lock_irqsave(&(intf->seq_lock), flags);
746 /* We do this verification because the user can be deleted
747 while a message is outstanding. */
748 if ((intf->seq_table[seq].inuse)
749 && (intf->seq_table[seq].seqid == seqid))
751 struct seq_table *ent = &(intf->seq_table[seq]);
752 ent->timeout = ent->orig_timeout;
753 rv = 0;
755 spin_unlock_irqrestore(&(intf->seq_lock), flags);
757 return rv;
760 /* Got an error for the send message for a specific sequence number. */
761 static int intf_err_seq(ipmi_smi_t intf,
762 long msgid,
763 unsigned int err)
765 int rv = -ENODEV;
766 unsigned long flags;
767 unsigned char seq;
768 unsigned long seqid;
769 struct ipmi_recv_msg *msg = NULL;
772 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
774 spin_lock_irqsave(&(intf->seq_lock), flags);
775 /* We do this verification because the user can be deleted
776 while a message is outstanding. */
777 if ((intf->seq_table[seq].inuse)
778 && (intf->seq_table[seq].seqid == seqid))
780 struct seq_table *ent = &(intf->seq_table[seq]);
782 ent->inuse = 0;
783 msg = ent->recv_msg;
784 rv = 0;
786 spin_unlock_irqrestore(&(intf->seq_lock), flags);
788 if (msg)
789 deliver_err_response(msg, err);
791 return rv;
795 int ipmi_create_user(unsigned int if_num,
796 struct ipmi_user_hndl *handler,
797 void *handler_data,
798 ipmi_user_t *user)
800 unsigned long flags;
801 ipmi_user_t new_user;
802 int rv = 0;
803 ipmi_smi_t intf;
805 /* There is no module usecount here, because it's not
806 required. Since this can only be used by and called from
807 other modules, they will implicitly use this module, and
808 thus this can't be removed unless the other modules are
809 removed. */
811 if (handler == NULL)
812 return -EINVAL;
814 /* Make sure the driver is actually initialized, this handles
815 problems with initialization order. */
816 if (!initialized) {
817 rv = ipmi_init_msghandler();
818 if (rv)
819 return rv;
821 /* The init code doesn't return an error if it was turned
822 off, but it won't initialize. Check that. */
823 if (!initialized)
824 return -ENODEV;
827 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
828 if (!new_user)
829 return -ENOMEM;
831 mutex_lock(&ipmi_interfaces_mutex);
832 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
833 if (intf->intf_num == if_num)
834 goto found;
836 /* Not found, return an error */
837 rv = -EINVAL;
838 goto out_kfree;
840 found:
841 /* Note that each existing user holds a refcount to the interface. */
842 kref_get(&intf->refcount);
844 kref_init(&new_user->refcount);
845 new_user->handler = handler;
846 new_user->handler_data = handler_data;
847 new_user->intf = intf;
848 new_user->gets_events = 0;
850 if (!try_module_get(intf->handlers->owner)) {
851 rv = -ENODEV;
852 goto out_kref;
855 if (intf->handlers->inc_usecount) {
856 rv = intf->handlers->inc_usecount(intf->send_info);
857 if (rv) {
858 module_put(intf->handlers->owner);
859 goto out_kref;
863 /* Hold the lock so intf->handlers is guaranteed to be good
864 * until now */
865 mutex_unlock(&ipmi_interfaces_mutex);
867 new_user->valid = 1;
868 spin_lock_irqsave(&intf->seq_lock, flags);
869 list_add_rcu(&new_user->link, &intf->users);
870 spin_unlock_irqrestore(&intf->seq_lock, flags);
871 *user = new_user;
872 return 0;
874 out_kref:
875 kref_put(&intf->refcount, intf_free);
876 out_kfree:
877 mutex_unlock(&ipmi_interfaces_mutex);
878 kfree(new_user);
879 return rv;
882 static void free_user(struct kref *ref)
884 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
885 kfree(user);
888 int ipmi_destroy_user(ipmi_user_t user)
890 ipmi_smi_t intf = user->intf;
891 int i;
892 unsigned long flags;
893 struct cmd_rcvr *rcvr;
894 struct cmd_rcvr *rcvrs = NULL;
896 user->valid = 0;
898 /* Remove the user from the interface's sequence table. */
899 spin_lock_irqsave(&intf->seq_lock, flags);
900 list_del_rcu(&user->link);
902 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
903 if (intf->seq_table[i].inuse
904 && (intf->seq_table[i].recv_msg->user == user))
906 intf->seq_table[i].inuse = 0;
907 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
910 spin_unlock_irqrestore(&intf->seq_lock, flags);
913 * Remove the user from the command receiver's table. First
914 * we build a list of everything (not using the standard link,
915 * since other things may be using it till we do
916 * synchronize_rcu()) then free everything in that list.
918 mutex_lock(&intf->cmd_rcvrs_mutex);
919 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
920 if (rcvr->user == user) {
921 list_del_rcu(&rcvr->link);
922 rcvr->next = rcvrs;
923 rcvrs = rcvr;
926 mutex_unlock(&intf->cmd_rcvrs_mutex);
927 synchronize_rcu();
928 while (rcvrs) {
929 rcvr = rcvrs;
930 rcvrs = rcvr->next;
931 kfree(rcvr);
934 mutex_lock(&ipmi_interfaces_mutex);
935 if (intf->handlers) {
936 module_put(intf->handlers->owner);
937 if (intf->handlers->dec_usecount)
938 intf->handlers->dec_usecount(intf->send_info);
940 mutex_unlock(&ipmi_interfaces_mutex);
942 kref_put(&intf->refcount, intf_free);
944 kref_put(&user->refcount, free_user);
946 return 0;
949 void ipmi_get_version(ipmi_user_t user,
950 unsigned char *major,
951 unsigned char *minor)
953 *major = user->intf->ipmi_version_major;
954 *minor = user->intf->ipmi_version_minor;
957 int ipmi_set_my_address(ipmi_user_t user,
958 unsigned int channel,
959 unsigned char address)
961 if (channel >= IPMI_MAX_CHANNELS)
962 return -EINVAL;
963 user->intf->channels[channel].address = address;
964 return 0;
967 int ipmi_get_my_address(ipmi_user_t user,
968 unsigned int channel,
969 unsigned char *address)
971 if (channel >= IPMI_MAX_CHANNELS)
972 return -EINVAL;
973 *address = user->intf->channels[channel].address;
974 return 0;
977 int ipmi_set_my_LUN(ipmi_user_t user,
978 unsigned int channel,
979 unsigned char LUN)
981 if (channel >= IPMI_MAX_CHANNELS)
982 return -EINVAL;
983 user->intf->channels[channel].lun = LUN & 0x3;
984 return 0;
987 int ipmi_get_my_LUN(ipmi_user_t user,
988 unsigned int channel,
989 unsigned char *address)
991 if (channel >= IPMI_MAX_CHANNELS)
992 return -EINVAL;
993 *address = user->intf->channels[channel].lun;
994 return 0;
997 int ipmi_get_maintenance_mode(ipmi_user_t user)
999 int mode;
1000 unsigned long flags;
1002 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1003 mode = user->intf->maintenance_mode;
1004 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1006 return mode;
1008 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1010 static void maintenance_mode_update(ipmi_smi_t intf)
1012 if (intf->handlers->set_maintenance_mode)
1013 intf->handlers->set_maintenance_mode(
1014 intf->send_info, intf->maintenance_mode_enable);
1017 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1019 int rv = 0;
1020 unsigned long flags;
1021 ipmi_smi_t intf = user->intf;
1023 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1024 if (intf->maintenance_mode != mode) {
1025 switch (mode) {
1026 case IPMI_MAINTENANCE_MODE_AUTO:
1027 intf->maintenance_mode = mode;
1028 intf->maintenance_mode_enable
1029 = (intf->auto_maintenance_timeout > 0);
1030 break;
1032 case IPMI_MAINTENANCE_MODE_OFF:
1033 intf->maintenance_mode = mode;
1034 intf->maintenance_mode_enable = 0;
1035 break;
1037 case IPMI_MAINTENANCE_MODE_ON:
1038 intf->maintenance_mode = mode;
1039 intf->maintenance_mode_enable = 1;
1040 break;
1042 default:
1043 rv = -EINVAL;
1044 goto out_unlock;
1047 maintenance_mode_update(intf);
1049 out_unlock:
1050 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1052 return rv;
1054 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1056 int ipmi_set_gets_events(ipmi_user_t user, int val)
1058 unsigned long flags;
1059 ipmi_smi_t intf = user->intf;
1060 struct ipmi_recv_msg *msg, *msg2;
1061 struct list_head msgs;
1063 INIT_LIST_HEAD(&msgs);
1065 spin_lock_irqsave(&intf->events_lock, flags);
1066 user->gets_events = val;
1068 if (intf->delivering_events)
1070 * Another thread is delivering events for this, so
1071 * let it handle any new events.
1073 goto out;
1075 /* Deliver any queued events. */
1076 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1077 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1078 list_move_tail(&msg->link, &msgs);
1079 intf->waiting_events_count = 0;
1081 intf->delivering_events = 1;
1082 spin_unlock_irqrestore(&intf->events_lock, flags);
1084 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1085 msg->user = user;
1086 kref_get(&user->refcount);
1087 deliver_response(msg);
1090 spin_lock_irqsave(&intf->events_lock, flags);
1091 intf->delivering_events = 0;
1094 out:
1095 spin_unlock_irqrestore(&intf->events_lock, flags);
1097 return 0;
1100 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1101 unsigned char netfn,
1102 unsigned char cmd,
1103 unsigned char chan)
1105 struct cmd_rcvr *rcvr;
1107 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1108 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1109 && (rcvr->chans & (1 << chan)))
1110 return rcvr;
1112 return NULL;
1115 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1116 unsigned char netfn,
1117 unsigned char cmd,
1118 unsigned int chans)
1120 struct cmd_rcvr *rcvr;
1122 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1123 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1124 && (rcvr->chans & chans))
1125 return 0;
1127 return 1;
1130 int ipmi_register_for_cmd(ipmi_user_t user,
1131 unsigned char netfn,
1132 unsigned char cmd,
1133 unsigned int chans)
1135 ipmi_smi_t intf = user->intf;
1136 struct cmd_rcvr *rcvr;
1137 int rv = 0;
1140 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1141 if (!rcvr)
1142 return -ENOMEM;
1143 rcvr->cmd = cmd;
1144 rcvr->netfn = netfn;
1145 rcvr->chans = chans;
1146 rcvr->user = user;
1148 mutex_lock(&intf->cmd_rcvrs_mutex);
1149 /* Make sure the command/netfn is not already registered. */
1150 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1151 rv = -EBUSY;
1152 goto out_unlock;
1155 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1157 out_unlock:
1158 mutex_unlock(&intf->cmd_rcvrs_mutex);
1159 if (rv)
1160 kfree(rcvr);
1162 return rv;
1165 int ipmi_unregister_for_cmd(ipmi_user_t user,
1166 unsigned char netfn,
1167 unsigned char cmd,
1168 unsigned int chans)
1170 ipmi_smi_t intf = user->intf;
1171 struct cmd_rcvr *rcvr;
1172 struct cmd_rcvr *rcvrs = NULL;
1173 int i, rv = -ENOENT;
1175 mutex_lock(&intf->cmd_rcvrs_mutex);
1176 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1177 if (((1 << i) & chans) == 0)
1178 continue;
1179 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1180 if (rcvr == NULL)
1181 continue;
1182 if (rcvr->user == user) {
1183 rv = 0;
1184 rcvr->chans &= ~chans;
1185 if (rcvr->chans == 0) {
1186 list_del_rcu(&rcvr->link);
1187 rcvr->next = rcvrs;
1188 rcvrs = rcvr;
1192 mutex_unlock(&intf->cmd_rcvrs_mutex);
1193 synchronize_rcu();
1194 while (rcvrs) {
1195 rcvr = rcvrs;
1196 rcvrs = rcvr->next;
1197 kfree(rcvr);
1199 return rv;
1202 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1204 ipmi_smi_t intf = user->intf;
1205 if (intf->handlers)
1206 intf->handlers->set_run_to_completion(intf->send_info, val);
1209 static unsigned char
1210 ipmb_checksum(unsigned char *data, int size)
1212 unsigned char csum = 0;
1214 for (; size > 0; size--, data++)
1215 csum += *data;
1217 return -csum;
1220 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1221 struct kernel_ipmi_msg *msg,
1222 struct ipmi_ipmb_addr *ipmb_addr,
1223 long msgid,
1224 unsigned char ipmb_seq,
1225 int broadcast,
1226 unsigned char source_address,
1227 unsigned char source_lun)
1229 int i = broadcast;
1231 /* Format the IPMB header data. */
1232 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1233 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1234 smi_msg->data[2] = ipmb_addr->channel;
1235 if (broadcast)
1236 smi_msg->data[3] = 0;
1237 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1238 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1239 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1240 smi_msg->data[i+6] = source_address;
1241 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1242 smi_msg->data[i+8] = msg->cmd;
1244 /* Now tack on the data to the message. */
1245 if (msg->data_len > 0)
1246 memcpy(&(smi_msg->data[i+9]), msg->data,
1247 msg->data_len);
1248 smi_msg->data_size = msg->data_len + 9;
1250 /* Now calculate the checksum and tack it on. */
1251 smi_msg->data[i+smi_msg->data_size]
1252 = ipmb_checksum(&(smi_msg->data[i+6]),
1253 smi_msg->data_size-6);
1255 /* Add on the checksum size and the offset from the
1256 broadcast. */
1257 smi_msg->data_size += 1 + i;
1259 smi_msg->msgid = msgid;
1262 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1263 struct kernel_ipmi_msg *msg,
1264 struct ipmi_lan_addr *lan_addr,
1265 long msgid,
1266 unsigned char ipmb_seq,
1267 unsigned char source_lun)
1269 /* Format the IPMB header data. */
1270 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1271 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1272 smi_msg->data[2] = lan_addr->channel;
1273 smi_msg->data[3] = lan_addr->session_handle;
1274 smi_msg->data[4] = lan_addr->remote_SWID;
1275 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1276 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1277 smi_msg->data[7] = lan_addr->local_SWID;
1278 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1279 smi_msg->data[9] = msg->cmd;
1281 /* Now tack on the data to the message. */
1282 if (msg->data_len > 0)
1283 memcpy(&(smi_msg->data[10]), msg->data,
1284 msg->data_len);
1285 smi_msg->data_size = msg->data_len + 10;
1287 /* Now calculate the checksum and tack it on. */
1288 smi_msg->data[smi_msg->data_size]
1289 = ipmb_checksum(&(smi_msg->data[7]),
1290 smi_msg->data_size-7);
1292 /* Add on the checksum size and the offset from the
1293 broadcast. */
1294 smi_msg->data_size += 1;
1296 smi_msg->msgid = msgid;
1299 /* Separate from ipmi_request so that the user does not have to be
1300 supplied in certain circumstances (mainly at panic time). If
1301 messages are supplied, they will be freed, even if an error
1302 occurs. */
1303 static int i_ipmi_request(ipmi_user_t user,
1304 ipmi_smi_t intf,
1305 struct ipmi_addr *addr,
1306 long msgid,
1307 struct kernel_ipmi_msg *msg,
1308 void *user_msg_data,
1309 void *supplied_smi,
1310 struct ipmi_recv_msg *supplied_recv,
1311 int priority,
1312 unsigned char source_address,
1313 unsigned char source_lun,
1314 int retries,
1315 unsigned int retry_time_ms)
1317 int rv = 0;
1318 struct ipmi_smi_msg *smi_msg;
1319 struct ipmi_recv_msg *recv_msg;
1320 unsigned long flags;
1321 struct ipmi_smi_handlers *handlers;
1324 if (supplied_recv) {
1325 recv_msg = supplied_recv;
1326 } else {
1327 recv_msg = ipmi_alloc_recv_msg();
1328 if (recv_msg == NULL) {
1329 return -ENOMEM;
1332 recv_msg->user_msg_data = user_msg_data;
1334 if (supplied_smi) {
1335 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1336 } else {
1337 smi_msg = ipmi_alloc_smi_msg();
1338 if (smi_msg == NULL) {
1339 ipmi_free_recv_msg(recv_msg);
1340 return -ENOMEM;
1344 rcu_read_lock();
1345 handlers = intf->handlers;
1346 if (!handlers) {
1347 rv = -ENODEV;
1348 goto out_err;
1351 recv_msg->user = user;
1352 if (user)
1353 kref_get(&user->refcount);
1354 recv_msg->msgid = msgid;
1355 /* Store the message to send in the receive message so timeout
1356 responses can get the proper response data. */
1357 recv_msg->msg = *msg;
1359 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1360 struct ipmi_system_interface_addr *smi_addr;
1362 if (msg->netfn & 1) {
1363 /* Responses are not allowed to the SMI. */
1364 rv = -EINVAL;
1365 goto out_err;
1368 smi_addr = (struct ipmi_system_interface_addr *) addr;
1369 if (smi_addr->lun > 3) {
1370 spin_lock_irqsave(&intf->counter_lock, flags);
1371 intf->sent_invalid_commands++;
1372 spin_unlock_irqrestore(&intf->counter_lock, flags);
1373 rv = -EINVAL;
1374 goto out_err;
1377 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1379 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1380 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1381 || (msg->cmd == IPMI_GET_MSG_CMD)
1382 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1384 /* We don't let the user do these, since we manage
1385 the sequence numbers. */
1386 spin_lock_irqsave(&intf->counter_lock, flags);
1387 intf->sent_invalid_commands++;
1388 spin_unlock_irqrestore(&intf->counter_lock, flags);
1389 rv = -EINVAL;
1390 goto out_err;
1393 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1394 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1395 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1396 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
1398 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1399 intf->auto_maintenance_timeout
1400 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1401 if (!intf->maintenance_mode
1402 && !intf->maintenance_mode_enable)
1404 intf->maintenance_mode_enable = 1;
1405 maintenance_mode_update(intf);
1407 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1408 flags);
1411 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1412 spin_lock_irqsave(&intf->counter_lock, flags);
1413 intf->sent_invalid_commands++;
1414 spin_unlock_irqrestore(&intf->counter_lock, flags);
1415 rv = -EMSGSIZE;
1416 goto out_err;
1419 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1420 smi_msg->data[1] = msg->cmd;
1421 smi_msg->msgid = msgid;
1422 smi_msg->user_data = recv_msg;
1423 if (msg->data_len > 0)
1424 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1425 smi_msg->data_size = msg->data_len + 2;
1426 spin_lock_irqsave(&intf->counter_lock, flags);
1427 intf->sent_local_commands++;
1428 spin_unlock_irqrestore(&intf->counter_lock, flags);
1429 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1430 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1432 struct ipmi_ipmb_addr *ipmb_addr;
1433 unsigned char ipmb_seq;
1434 long seqid;
1435 int broadcast = 0;
1437 if (addr->channel >= IPMI_MAX_CHANNELS) {
1438 spin_lock_irqsave(&intf->counter_lock, flags);
1439 intf->sent_invalid_commands++;
1440 spin_unlock_irqrestore(&intf->counter_lock, flags);
1441 rv = -EINVAL;
1442 goto out_err;
1445 if (intf->channels[addr->channel].medium
1446 != IPMI_CHANNEL_MEDIUM_IPMB)
1448 spin_lock_irqsave(&intf->counter_lock, flags);
1449 intf->sent_invalid_commands++;
1450 spin_unlock_irqrestore(&intf->counter_lock, flags);
1451 rv = -EINVAL;
1452 goto out_err;
1455 if (retries < 0) {
1456 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1457 retries = 0; /* Don't retry broadcasts. */
1458 else
1459 retries = 4;
1461 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1462 /* Broadcasts add a zero at the beginning of the
1463 message, but otherwise is the same as an IPMB
1464 address. */
1465 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1466 broadcast = 1;
1470 /* Default to 1 second retries. */
1471 if (retry_time_ms == 0)
1472 retry_time_ms = 1000;
1474 /* 9 for the header and 1 for the checksum, plus
1475 possibly one for the broadcast. */
1476 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1477 spin_lock_irqsave(&intf->counter_lock, flags);
1478 intf->sent_invalid_commands++;
1479 spin_unlock_irqrestore(&intf->counter_lock, flags);
1480 rv = -EMSGSIZE;
1481 goto out_err;
1484 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1485 if (ipmb_addr->lun > 3) {
1486 spin_lock_irqsave(&intf->counter_lock, flags);
1487 intf->sent_invalid_commands++;
1488 spin_unlock_irqrestore(&intf->counter_lock, flags);
1489 rv = -EINVAL;
1490 goto out_err;
1493 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1495 if (recv_msg->msg.netfn & 0x1) {
1496 /* It's a response, so use the user's sequence
1497 from msgid. */
1498 spin_lock_irqsave(&intf->counter_lock, flags);
1499 intf->sent_ipmb_responses++;
1500 spin_unlock_irqrestore(&intf->counter_lock, flags);
1501 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1502 msgid, broadcast,
1503 source_address, source_lun);
1505 /* Save the receive message so we can use it
1506 to deliver the response. */
1507 smi_msg->user_data = recv_msg;
1508 } else {
1509 /* It's a command, so get a sequence for it. */
1511 spin_lock_irqsave(&(intf->seq_lock), flags);
1513 spin_lock(&intf->counter_lock);
1514 intf->sent_ipmb_commands++;
1515 spin_unlock(&intf->counter_lock);
1517 /* Create a sequence number with a 1 second
1518 timeout and 4 retries. */
1519 rv = intf_next_seq(intf,
1520 recv_msg,
1521 retry_time_ms,
1522 retries,
1523 broadcast,
1524 &ipmb_seq,
1525 &seqid);
1526 if (rv) {
1527 /* We have used up all the sequence numbers,
1528 probably, so abort. */
1529 spin_unlock_irqrestore(&(intf->seq_lock),
1530 flags);
1531 goto out_err;
1534 /* Store the sequence number in the message,
1535 so that when the send message response
1536 comes back we can start the timer. */
1537 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1538 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1539 ipmb_seq, broadcast,
1540 source_address, source_lun);
1542 /* Copy the message into the recv message data, so we
1543 can retransmit it later if necessary. */
1544 memcpy(recv_msg->msg_data, smi_msg->data,
1545 smi_msg->data_size);
1546 recv_msg->msg.data = recv_msg->msg_data;
1547 recv_msg->msg.data_len = smi_msg->data_size;
1549 /* We don't unlock until here, because we need
1550 to copy the completed message into the
1551 recv_msg before we release the lock.
1552 Otherwise, race conditions may bite us. I
1553 know that's pretty paranoid, but I prefer
1554 to be correct. */
1555 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1557 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1558 struct ipmi_lan_addr *lan_addr;
1559 unsigned char ipmb_seq;
1560 long seqid;
1562 if (addr->channel >= IPMI_MAX_CHANNELS) {
1563 spin_lock_irqsave(&intf->counter_lock, flags);
1564 intf->sent_invalid_commands++;
1565 spin_unlock_irqrestore(&intf->counter_lock, flags);
1566 rv = -EINVAL;
1567 goto out_err;
1570 if ((intf->channels[addr->channel].medium
1571 != IPMI_CHANNEL_MEDIUM_8023LAN)
1572 && (intf->channels[addr->channel].medium
1573 != IPMI_CHANNEL_MEDIUM_ASYNC))
1575 spin_lock_irqsave(&intf->counter_lock, flags);
1576 intf->sent_invalid_commands++;
1577 spin_unlock_irqrestore(&intf->counter_lock, flags);
1578 rv = -EINVAL;
1579 goto out_err;
1582 retries = 4;
1584 /* Default to 1 second retries. */
1585 if (retry_time_ms == 0)
1586 retry_time_ms = 1000;
1588 /* 11 for the header and 1 for the checksum. */
1589 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1590 spin_lock_irqsave(&intf->counter_lock, flags);
1591 intf->sent_invalid_commands++;
1592 spin_unlock_irqrestore(&intf->counter_lock, flags);
1593 rv = -EMSGSIZE;
1594 goto out_err;
1597 lan_addr = (struct ipmi_lan_addr *) addr;
1598 if (lan_addr->lun > 3) {
1599 spin_lock_irqsave(&intf->counter_lock, flags);
1600 intf->sent_invalid_commands++;
1601 spin_unlock_irqrestore(&intf->counter_lock, flags);
1602 rv = -EINVAL;
1603 goto out_err;
1606 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1608 if (recv_msg->msg.netfn & 0x1) {
1609 /* It's a response, so use the user's sequence
1610 from msgid. */
1611 spin_lock_irqsave(&intf->counter_lock, flags);
1612 intf->sent_lan_responses++;
1613 spin_unlock_irqrestore(&intf->counter_lock, flags);
1614 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1615 msgid, source_lun);
1617 /* Save the receive message so we can use it
1618 to deliver the response. */
1619 smi_msg->user_data = recv_msg;
1620 } else {
1621 /* It's a command, so get a sequence for it. */
1623 spin_lock_irqsave(&(intf->seq_lock), flags);
1625 spin_lock(&intf->counter_lock);
1626 intf->sent_lan_commands++;
1627 spin_unlock(&intf->counter_lock);
1629 /* Create a sequence number with a 1 second
1630 timeout and 4 retries. */
1631 rv = intf_next_seq(intf,
1632 recv_msg,
1633 retry_time_ms,
1634 retries,
1636 &ipmb_seq,
1637 &seqid);
1638 if (rv) {
1639 /* We have used up all the sequence numbers,
1640 probably, so abort. */
1641 spin_unlock_irqrestore(&(intf->seq_lock),
1642 flags);
1643 goto out_err;
1646 /* Store the sequence number in the message,
1647 so that when the send message response
1648 comes back we can start the timer. */
1649 format_lan_msg(smi_msg, msg, lan_addr,
1650 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1651 ipmb_seq, source_lun);
1653 /* Copy the message into the recv message data, so we
1654 can retransmit it later if necessary. */
1655 memcpy(recv_msg->msg_data, smi_msg->data,
1656 smi_msg->data_size);
1657 recv_msg->msg.data = recv_msg->msg_data;
1658 recv_msg->msg.data_len = smi_msg->data_size;
1660 /* We don't unlock until here, because we need
1661 to copy the completed message into the
1662 recv_msg before we release the lock.
1663 Otherwise, race conditions may bite us. I
1664 know that's pretty paranoid, but I prefer
1665 to be correct. */
1666 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1668 } else {
1669 /* Unknown address type. */
1670 spin_lock_irqsave(&intf->counter_lock, flags);
1671 intf->sent_invalid_commands++;
1672 spin_unlock_irqrestore(&intf->counter_lock, flags);
1673 rv = -EINVAL;
1674 goto out_err;
1677 #ifdef DEBUG_MSGING
1679 int m;
1680 for (m = 0; m < smi_msg->data_size; m++)
1681 printk(" %2.2x", smi_msg->data[m]);
1682 printk("\n");
1684 #endif
1686 handlers->sender(intf->send_info, smi_msg, priority);
1687 rcu_read_unlock();
1689 return 0;
1691 out_err:
1692 rcu_read_unlock();
1693 ipmi_free_smi_msg(smi_msg);
1694 ipmi_free_recv_msg(recv_msg);
1695 return rv;
1698 static int check_addr(ipmi_smi_t intf,
1699 struct ipmi_addr *addr,
1700 unsigned char *saddr,
1701 unsigned char *lun)
1703 if (addr->channel >= IPMI_MAX_CHANNELS)
1704 return -EINVAL;
1705 *lun = intf->channels[addr->channel].lun;
1706 *saddr = intf->channels[addr->channel].address;
1707 return 0;
1710 int ipmi_request_settime(ipmi_user_t user,
1711 struct ipmi_addr *addr,
1712 long msgid,
1713 struct kernel_ipmi_msg *msg,
1714 void *user_msg_data,
1715 int priority,
1716 int retries,
1717 unsigned int retry_time_ms)
1719 unsigned char saddr, lun;
1720 int rv;
1722 if (!user)
1723 return -EINVAL;
1724 rv = check_addr(user->intf, addr, &saddr, &lun);
1725 if (rv)
1726 return rv;
1727 return i_ipmi_request(user,
1728 user->intf,
1729 addr,
1730 msgid,
1731 msg,
1732 user_msg_data,
1733 NULL, NULL,
1734 priority,
1735 saddr,
1736 lun,
1737 retries,
1738 retry_time_ms);
1741 int ipmi_request_supply_msgs(ipmi_user_t user,
1742 struct ipmi_addr *addr,
1743 long msgid,
1744 struct kernel_ipmi_msg *msg,
1745 void *user_msg_data,
1746 void *supplied_smi,
1747 struct ipmi_recv_msg *supplied_recv,
1748 int priority)
1750 unsigned char saddr, lun;
1751 int rv;
1753 if (!user)
1754 return -EINVAL;
1755 rv = check_addr(user->intf, addr, &saddr, &lun);
1756 if (rv)
1757 return rv;
1758 return i_ipmi_request(user,
1759 user->intf,
1760 addr,
1761 msgid,
1762 msg,
1763 user_msg_data,
1764 supplied_smi,
1765 supplied_recv,
1766 priority,
1767 saddr,
1768 lun,
1769 -1, 0);
1772 #ifdef CONFIG_PROC_FS
1773 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1774 int count, int *eof, void *data)
1776 char *out = (char *) page;
1777 ipmi_smi_t intf = data;
1778 int i;
1779 int rv = 0;
1781 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1782 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1783 out[rv-1] = '\n'; /* Replace the final space with a newline */
1784 out[rv] = '\0';
1785 rv++;
1786 return rv;
1789 static int version_file_read_proc(char *page, char **start, off_t off,
1790 int count, int *eof, void *data)
1792 char *out = (char *) page;
1793 ipmi_smi_t intf = data;
1795 return sprintf(out, "%d.%d\n",
1796 ipmi_version_major(&intf->bmc->id),
1797 ipmi_version_minor(&intf->bmc->id));
1800 static int stat_file_read_proc(char *page, char **start, off_t off,
1801 int count, int *eof, void *data)
1803 char *out = (char *) page;
1804 ipmi_smi_t intf = data;
1806 out += sprintf(out, "sent_invalid_commands: %d\n",
1807 intf->sent_invalid_commands);
1808 out += sprintf(out, "sent_local_commands: %d\n",
1809 intf->sent_local_commands);
1810 out += sprintf(out, "handled_local_responses: %d\n",
1811 intf->handled_local_responses);
1812 out += sprintf(out, "unhandled_local_responses: %d\n",
1813 intf->unhandled_local_responses);
1814 out += sprintf(out, "sent_ipmb_commands: %d\n",
1815 intf->sent_ipmb_commands);
1816 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1817 intf->sent_ipmb_command_errs);
1818 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1819 intf->retransmitted_ipmb_commands);
1820 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1821 intf->timed_out_ipmb_commands);
1822 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1823 intf->timed_out_ipmb_broadcasts);
1824 out += sprintf(out, "sent_ipmb_responses: %d\n",
1825 intf->sent_ipmb_responses);
1826 out += sprintf(out, "handled_ipmb_responses: %d\n",
1827 intf->handled_ipmb_responses);
1828 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1829 intf->invalid_ipmb_responses);
1830 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1831 intf->unhandled_ipmb_responses);
1832 out += sprintf(out, "sent_lan_commands: %d\n",
1833 intf->sent_lan_commands);
1834 out += sprintf(out, "sent_lan_command_errs: %d\n",
1835 intf->sent_lan_command_errs);
1836 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1837 intf->retransmitted_lan_commands);
1838 out += sprintf(out, "timed_out_lan_commands: %d\n",
1839 intf->timed_out_lan_commands);
1840 out += sprintf(out, "sent_lan_responses: %d\n",
1841 intf->sent_lan_responses);
1842 out += sprintf(out, "handled_lan_responses: %d\n",
1843 intf->handled_lan_responses);
1844 out += sprintf(out, "invalid_lan_responses: %d\n",
1845 intf->invalid_lan_responses);
1846 out += sprintf(out, "unhandled_lan_responses: %d\n",
1847 intf->unhandled_lan_responses);
1848 out += sprintf(out, "handled_commands: %d\n",
1849 intf->handled_commands);
1850 out += sprintf(out, "invalid_commands: %d\n",
1851 intf->invalid_commands);
1852 out += sprintf(out, "unhandled_commands: %d\n",
1853 intf->unhandled_commands);
1854 out += sprintf(out, "invalid_events: %d\n",
1855 intf->invalid_events);
1856 out += sprintf(out, "events: %d\n",
1857 intf->events);
1859 return (out - ((char *) page));
1861 #endif /* CONFIG_PROC_FS */
1863 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1864 read_proc_t *read_proc, write_proc_t *write_proc,
1865 void *data, struct module *owner)
1867 int rv = 0;
1868 #ifdef CONFIG_PROC_FS
1869 struct proc_dir_entry *file;
1870 struct ipmi_proc_entry *entry;
1872 /* Create a list element. */
1873 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1874 if (!entry)
1875 return -ENOMEM;
1876 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1877 if (!entry->name) {
1878 kfree(entry);
1879 return -ENOMEM;
1881 strcpy(entry->name, name);
1883 file = create_proc_entry(name, 0, smi->proc_dir);
1884 if (!file) {
1885 kfree(entry->name);
1886 kfree(entry);
1887 rv = -ENOMEM;
1888 } else {
1889 file->nlink = 1;
1890 file->data = data;
1891 file->read_proc = read_proc;
1892 file->write_proc = write_proc;
1893 file->owner = owner;
1895 spin_lock(&smi->proc_entry_lock);
1896 /* Stick it on the list. */
1897 entry->next = smi->proc_entries;
1898 smi->proc_entries = entry;
1899 spin_unlock(&smi->proc_entry_lock);
1901 #endif /* CONFIG_PROC_FS */
1903 return rv;
1906 static int add_proc_entries(ipmi_smi_t smi, int num)
1908 int rv = 0;
1910 #ifdef CONFIG_PROC_FS
1911 sprintf(smi->proc_dir_name, "%d", num);
1912 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1913 if (!smi->proc_dir)
1914 rv = -ENOMEM;
1915 else {
1916 smi->proc_dir->owner = THIS_MODULE;
1919 if (rv == 0)
1920 rv = ipmi_smi_add_proc_entry(smi, "stats",
1921 stat_file_read_proc, NULL,
1922 smi, THIS_MODULE);
1924 if (rv == 0)
1925 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1926 ipmb_file_read_proc, NULL,
1927 smi, THIS_MODULE);
1929 if (rv == 0)
1930 rv = ipmi_smi_add_proc_entry(smi, "version",
1931 version_file_read_proc, NULL,
1932 smi, THIS_MODULE);
1933 #endif /* CONFIG_PROC_FS */
1935 return rv;
1938 static void remove_proc_entries(ipmi_smi_t smi)
1940 #ifdef CONFIG_PROC_FS
1941 struct ipmi_proc_entry *entry;
1943 spin_lock(&smi->proc_entry_lock);
1944 while (smi->proc_entries) {
1945 entry = smi->proc_entries;
1946 smi->proc_entries = entry->next;
1948 remove_proc_entry(entry->name, smi->proc_dir);
1949 kfree(entry->name);
1950 kfree(entry);
1952 spin_unlock(&smi->proc_entry_lock);
1953 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1954 #endif /* CONFIG_PROC_FS */
1957 static int __find_bmc_guid(struct device *dev, void *data)
1959 unsigned char *id = data;
1960 struct bmc_device *bmc = dev_get_drvdata(dev);
1961 return memcmp(bmc->guid, id, 16) == 0;
1964 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1965 unsigned char *guid)
1967 struct device *dev;
1969 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1970 if (dev)
1971 return dev_get_drvdata(dev);
1972 else
1973 return NULL;
1976 struct prod_dev_id {
1977 unsigned int product_id;
1978 unsigned char device_id;
1981 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1983 struct prod_dev_id *id = data;
1984 struct bmc_device *bmc = dev_get_drvdata(dev);
1986 return (bmc->id.product_id == id->product_id
1987 && bmc->id.device_id == id->device_id);
1990 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1991 struct device_driver *drv,
1992 unsigned int product_id, unsigned char device_id)
1994 struct prod_dev_id id = {
1995 .product_id = product_id,
1996 .device_id = device_id,
1998 struct device *dev;
2000 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2001 if (dev)
2002 return dev_get_drvdata(dev);
2003 else
2004 return NULL;
2007 static ssize_t device_id_show(struct device *dev,
2008 struct device_attribute *attr,
2009 char *buf)
2011 struct bmc_device *bmc = dev_get_drvdata(dev);
2013 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2016 static ssize_t provides_dev_sdrs_show(struct device *dev,
2017 struct device_attribute *attr,
2018 char *buf)
2020 struct bmc_device *bmc = dev_get_drvdata(dev);
2022 return snprintf(buf, 10, "%u\n",
2023 (bmc->id.device_revision & 0x80) >> 7);
2026 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2027 char *buf)
2029 struct bmc_device *bmc = dev_get_drvdata(dev);
2031 return snprintf(buf, 20, "%u\n",
2032 bmc->id.device_revision & 0x0F);
2035 static ssize_t firmware_rev_show(struct device *dev,
2036 struct device_attribute *attr,
2037 char *buf)
2039 struct bmc_device *bmc = dev_get_drvdata(dev);
2041 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2042 bmc->id.firmware_revision_2);
2045 static ssize_t ipmi_version_show(struct device *dev,
2046 struct device_attribute *attr,
2047 char *buf)
2049 struct bmc_device *bmc = dev_get_drvdata(dev);
2051 return snprintf(buf, 20, "%u.%u\n",
2052 ipmi_version_major(&bmc->id),
2053 ipmi_version_minor(&bmc->id));
2056 static ssize_t add_dev_support_show(struct device *dev,
2057 struct device_attribute *attr,
2058 char *buf)
2060 struct bmc_device *bmc = dev_get_drvdata(dev);
2062 return snprintf(buf, 10, "0x%02x\n",
2063 bmc->id.additional_device_support);
2066 static ssize_t manufacturer_id_show(struct device *dev,
2067 struct device_attribute *attr,
2068 char *buf)
2070 struct bmc_device *bmc = dev_get_drvdata(dev);
2072 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2075 static ssize_t product_id_show(struct device *dev,
2076 struct device_attribute *attr,
2077 char *buf)
2079 struct bmc_device *bmc = dev_get_drvdata(dev);
2081 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2084 static ssize_t aux_firmware_rev_show(struct device *dev,
2085 struct device_attribute *attr,
2086 char *buf)
2088 struct bmc_device *bmc = dev_get_drvdata(dev);
2090 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2091 bmc->id.aux_firmware_revision[3],
2092 bmc->id.aux_firmware_revision[2],
2093 bmc->id.aux_firmware_revision[1],
2094 bmc->id.aux_firmware_revision[0]);
2097 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2098 char *buf)
2100 struct bmc_device *bmc = dev_get_drvdata(dev);
2102 return snprintf(buf, 100, "%Lx%Lx\n",
2103 (long long) bmc->guid[0],
2104 (long long) bmc->guid[8]);
2107 static void remove_files(struct bmc_device *bmc)
2109 if (!bmc->dev)
2110 return;
2112 device_remove_file(&bmc->dev->dev,
2113 &bmc->device_id_attr);
2114 device_remove_file(&bmc->dev->dev,
2115 &bmc->provides_dev_sdrs_attr);
2116 device_remove_file(&bmc->dev->dev,
2117 &bmc->revision_attr);
2118 device_remove_file(&bmc->dev->dev,
2119 &bmc->firmware_rev_attr);
2120 device_remove_file(&bmc->dev->dev,
2121 &bmc->version_attr);
2122 device_remove_file(&bmc->dev->dev,
2123 &bmc->add_dev_support_attr);
2124 device_remove_file(&bmc->dev->dev,
2125 &bmc->manufacturer_id_attr);
2126 device_remove_file(&bmc->dev->dev,
2127 &bmc->product_id_attr);
2129 if (bmc->id.aux_firmware_revision_set)
2130 device_remove_file(&bmc->dev->dev,
2131 &bmc->aux_firmware_rev_attr);
2132 if (bmc->guid_set)
2133 device_remove_file(&bmc->dev->dev,
2134 &bmc->guid_attr);
2137 static void
2138 cleanup_bmc_device(struct kref *ref)
2140 struct bmc_device *bmc;
2142 bmc = container_of(ref, struct bmc_device, refcount);
2144 remove_files(bmc);
2145 platform_device_unregister(bmc->dev);
2146 kfree(bmc);
2149 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2151 struct bmc_device *bmc = intf->bmc;
2153 if (intf->sysfs_name) {
2154 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2155 kfree(intf->sysfs_name);
2156 intf->sysfs_name = NULL;
2158 if (intf->my_dev_name) {
2159 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2160 kfree(intf->my_dev_name);
2161 intf->my_dev_name = NULL;
2164 mutex_lock(&ipmidriver_mutex);
2165 kref_put(&bmc->refcount, cleanup_bmc_device);
2166 intf->bmc = NULL;
2167 mutex_unlock(&ipmidriver_mutex);
2170 static int create_files(struct bmc_device *bmc)
2172 int err;
2174 bmc->device_id_attr.attr.name = "device_id";
2175 bmc->device_id_attr.attr.owner = THIS_MODULE;
2176 bmc->device_id_attr.attr.mode = S_IRUGO;
2177 bmc->device_id_attr.show = device_id_show;
2179 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2180 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2181 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2182 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2184 bmc->revision_attr.attr.name = "revision";
2185 bmc->revision_attr.attr.owner = THIS_MODULE;
2186 bmc->revision_attr.attr.mode = S_IRUGO;
2187 bmc->revision_attr.show = revision_show;
2189 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2190 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2191 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2192 bmc->firmware_rev_attr.show = firmware_rev_show;
2194 bmc->version_attr.attr.name = "ipmi_version";
2195 bmc->version_attr.attr.owner = THIS_MODULE;
2196 bmc->version_attr.attr.mode = S_IRUGO;
2197 bmc->version_attr.show = ipmi_version_show;
2199 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2200 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2201 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2202 bmc->add_dev_support_attr.show = add_dev_support_show;
2204 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2205 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2206 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2207 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2209 bmc->product_id_attr.attr.name = "product_id";
2210 bmc->product_id_attr.attr.owner = THIS_MODULE;
2211 bmc->product_id_attr.attr.mode = S_IRUGO;
2212 bmc->product_id_attr.show = product_id_show;
2214 bmc->guid_attr.attr.name = "guid";
2215 bmc->guid_attr.attr.owner = THIS_MODULE;
2216 bmc->guid_attr.attr.mode = S_IRUGO;
2217 bmc->guid_attr.show = guid_show;
2219 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2220 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2221 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2222 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2224 err = device_create_file(&bmc->dev->dev,
2225 &bmc->device_id_attr);
2226 if (err) goto out;
2227 err = device_create_file(&bmc->dev->dev,
2228 &bmc->provides_dev_sdrs_attr);
2229 if (err) goto out_devid;
2230 err = device_create_file(&bmc->dev->dev,
2231 &bmc->revision_attr);
2232 if (err) goto out_sdrs;
2233 err = device_create_file(&bmc->dev->dev,
2234 &bmc->firmware_rev_attr);
2235 if (err) goto out_rev;
2236 err = device_create_file(&bmc->dev->dev,
2237 &bmc->version_attr);
2238 if (err) goto out_firm;
2239 err = device_create_file(&bmc->dev->dev,
2240 &bmc->add_dev_support_attr);
2241 if (err) goto out_version;
2242 err = device_create_file(&bmc->dev->dev,
2243 &bmc->manufacturer_id_attr);
2244 if (err) goto out_add_dev;
2245 err = device_create_file(&bmc->dev->dev,
2246 &bmc->product_id_attr);
2247 if (err) goto out_manu;
2248 if (bmc->id.aux_firmware_revision_set) {
2249 err = device_create_file(&bmc->dev->dev,
2250 &bmc->aux_firmware_rev_attr);
2251 if (err) goto out_prod_id;
2253 if (bmc->guid_set) {
2254 err = device_create_file(&bmc->dev->dev,
2255 &bmc->guid_attr);
2256 if (err) goto out_aux_firm;
2259 return 0;
2261 out_aux_firm:
2262 if (bmc->id.aux_firmware_revision_set)
2263 device_remove_file(&bmc->dev->dev,
2264 &bmc->aux_firmware_rev_attr);
2265 out_prod_id:
2266 device_remove_file(&bmc->dev->dev,
2267 &bmc->product_id_attr);
2268 out_manu:
2269 device_remove_file(&bmc->dev->dev,
2270 &bmc->manufacturer_id_attr);
2271 out_add_dev:
2272 device_remove_file(&bmc->dev->dev,
2273 &bmc->add_dev_support_attr);
2274 out_version:
2275 device_remove_file(&bmc->dev->dev,
2276 &bmc->version_attr);
2277 out_firm:
2278 device_remove_file(&bmc->dev->dev,
2279 &bmc->firmware_rev_attr);
2280 out_rev:
2281 device_remove_file(&bmc->dev->dev,
2282 &bmc->revision_attr);
2283 out_sdrs:
2284 device_remove_file(&bmc->dev->dev,
2285 &bmc->provides_dev_sdrs_attr);
2286 out_devid:
2287 device_remove_file(&bmc->dev->dev,
2288 &bmc->device_id_attr);
2289 out:
2290 return err;
2293 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2294 const char *sysfs_name)
2296 int rv;
2297 struct bmc_device *bmc = intf->bmc;
2298 struct bmc_device *old_bmc;
2299 int size;
2300 char dummy[1];
2302 mutex_lock(&ipmidriver_mutex);
2305 * Try to find if there is an bmc_device struct
2306 * representing the interfaced BMC already
2308 if (bmc->guid_set)
2309 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2310 else
2311 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2312 bmc->id.product_id,
2313 bmc->id.device_id);
2316 * If there is already an bmc_device, free the new one,
2317 * otherwise register the new BMC device
2319 if (old_bmc) {
2320 kfree(bmc);
2321 intf->bmc = old_bmc;
2322 bmc = old_bmc;
2324 kref_get(&bmc->refcount);
2325 mutex_unlock(&ipmidriver_mutex);
2327 printk(KERN_INFO
2328 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2329 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2330 bmc->id.manufacturer_id,
2331 bmc->id.product_id,
2332 bmc->id.device_id);
2333 } else {
2334 char name[14];
2335 unsigned char orig_dev_id = bmc->id.device_id;
2336 int warn_printed = 0;
2338 snprintf(name, sizeof(name),
2339 "ipmi_bmc.%4.4x", bmc->id.product_id);
2341 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2342 bmc->id.product_id,
2343 bmc->id.device_id)) {
2344 if (!warn_printed) {
2345 printk(KERN_WARNING PFX
2346 "This machine has two different BMCs"
2347 " with the same product id and device"
2348 " id. This is an error in the"
2349 " firmware, but incrementing the"
2350 " device id to work around the problem."
2351 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2352 bmc->id.product_id, bmc->id.device_id);
2353 warn_printed = 1;
2355 bmc->id.device_id++; /* Wraps at 255 */
2356 if (bmc->id.device_id == orig_dev_id) {
2357 printk(KERN_ERR PFX
2358 "Out of device ids!\n");
2359 break;
2363 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2364 if (!bmc->dev) {
2365 mutex_unlock(&ipmidriver_mutex);
2366 printk(KERN_ERR
2367 "ipmi_msghandler:"
2368 " Unable to allocate platform device\n");
2369 return -ENOMEM;
2371 bmc->dev->dev.driver = &ipmidriver;
2372 dev_set_drvdata(&bmc->dev->dev, bmc);
2373 kref_init(&bmc->refcount);
2375 rv = platform_device_add(bmc->dev);
2376 mutex_unlock(&ipmidriver_mutex);
2377 if (rv) {
2378 platform_device_put(bmc->dev);
2379 bmc->dev = NULL;
2380 printk(KERN_ERR
2381 "ipmi_msghandler:"
2382 " Unable to register bmc device: %d\n",
2383 rv);
2384 /* Don't go to out_err, you can only do that if
2385 the device is registered already. */
2386 return rv;
2389 rv = create_files(bmc);
2390 if (rv) {
2391 mutex_lock(&ipmidriver_mutex);
2392 platform_device_unregister(bmc->dev);
2393 mutex_unlock(&ipmidriver_mutex);
2395 return rv;
2398 printk(KERN_INFO
2399 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2400 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2401 bmc->id.manufacturer_id,
2402 bmc->id.product_id,
2403 bmc->id.device_id);
2407 * create symlink from system interface device to bmc device
2408 * and back.
2410 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2411 if (!intf->sysfs_name) {
2412 rv = -ENOMEM;
2413 printk(KERN_ERR
2414 "ipmi_msghandler: allocate link to BMC: %d\n",
2415 rv);
2416 goto out_err;
2419 rv = sysfs_create_link(&intf->si_dev->kobj,
2420 &bmc->dev->dev.kobj, intf->sysfs_name);
2421 if (rv) {
2422 kfree(intf->sysfs_name);
2423 intf->sysfs_name = NULL;
2424 printk(KERN_ERR
2425 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2426 rv);
2427 goto out_err;
2430 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2431 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2432 if (!intf->my_dev_name) {
2433 kfree(intf->sysfs_name);
2434 intf->sysfs_name = NULL;
2435 rv = -ENOMEM;
2436 printk(KERN_ERR
2437 "ipmi_msghandler: allocate link from BMC: %d\n",
2438 rv);
2439 goto out_err;
2441 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2443 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2444 intf->my_dev_name);
2445 if (rv) {
2446 kfree(intf->sysfs_name);
2447 intf->sysfs_name = NULL;
2448 kfree(intf->my_dev_name);
2449 intf->my_dev_name = NULL;
2450 printk(KERN_ERR
2451 "ipmi_msghandler:"
2452 " Unable to create symlink to bmc: %d\n",
2453 rv);
2454 goto out_err;
2457 return 0;
2459 out_err:
2460 ipmi_bmc_unregister(intf);
2461 return rv;
2464 static int
2465 send_guid_cmd(ipmi_smi_t intf, int chan)
2467 struct kernel_ipmi_msg msg;
2468 struct ipmi_system_interface_addr si;
2470 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2471 si.channel = IPMI_BMC_CHANNEL;
2472 si.lun = 0;
2474 msg.netfn = IPMI_NETFN_APP_REQUEST;
2475 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2476 msg.data = NULL;
2477 msg.data_len = 0;
2478 return i_ipmi_request(NULL,
2479 intf,
2480 (struct ipmi_addr *) &si,
2482 &msg,
2483 intf,
2484 NULL,
2485 NULL,
2487 intf->channels[0].address,
2488 intf->channels[0].lun,
2489 -1, 0);
2492 static void
2493 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2495 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2496 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2497 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2498 /* Not for me */
2499 return;
2501 if (msg->msg.data[0] != 0) {
2502 /* Error from getting the GUID, the BMC doesn't have one. */
2503 intf->bmc->guid_set = 0;
2504 goto out;
2507 if (msg->msg.data_len < 17) {
2508 intf->bmc->guid_set = 0;
2509 printk(KERN_WARNING PFX
2510 "guid_handler: The GUID response from the BMC was too"
2511 " short, it was %d but should have been 17. Assuming"
2512 " GUID is not available.\n",
2513 msg->msg.data_len);
2514 goto out;
2517 memcpy(intf->bmc->guid, msg->msg.data, 16);
2518 intf->bmc->guid_set = 1;
2519 out:
2520 wake_up(&intf->waitq);
2523 static void
2524 get_guid(ipmi_smi_t intf)
2526 int rv;
2528 intf->bmc->guid_set = 0x2;
2529 intf->null_user_handler = guid_handler;
2530 rv = send_guid_cmd(intf, 0);
2531 if (rv)
2532 /* Send failed, no GUID available. */
2533 intf->bmc->guid_set = 0;
2534 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2535 intf->null_user_handler = NULL;
2538 static int
2539 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2541 struct kernel_ipmi_msg msg;
2542 unsigned char data[1];
2543 struct ipmi_system_interface_addr si;
2545 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2546 si.channel = IPMI_BMC_CHANNEL;
2547 si.lun = 0;
2549 msg.netfn = IPMI_NETFN_APP_REQUEST;
2550 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2551 msg.data = data;
2552 msg.data_len = 1;
2553 data[0] = chan;
2554 return i_ipmi_request(NULL,
2555 intf,
2556 (struct ipmi_addr *) &si,
2558 &msg,
2559 intf,
2560 NULL,
2561 NULL,
2563 intf->channels[0].address,
2564 intf->channels[0].lun,
2565 -1, 0);
2568 static void
2569 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2571 int rv = 0;
2572 int chan;
2574 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2575 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2576 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2578 /* It's the one we want */
2579 if (msg->msg.data[0] != 0) {
2580 /* Got an error from the channel, just go on. */
2582 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2583 /* If the MC does not support this
2584 command, that is legal. We just
2585 assume it has one IPMB at channel
2586 zero. */
2587 intf->channels[0].medium
2588 = IPMI_CHANNEL_MEDIUM_IPMB;
2589 intf->channels[0].protocol
2590 = IPMI_CHANNEL_PROTOCOL_IPMB;
2591 rv = -ENOSYS;
2593 intf->curr_channel = IPMI_MAX_CHANNELS;
2594 wake_up(&intf->waitq);
2595 goto out;
2597 goto next_channel;
2599 if (msg->msg.data_len < 4) {
2600 /* Message not big enough, just go on. */
2601 goto next_channel;
2603 chan = intf->curr_channel;
2604 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2605 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2607 next_channel:
2608 intf->curr_channel++;
2609 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2610 wake_up(&intf->waitq);
2611 else
2612 rv = send_channel_info_cmd(intf, intf->curr_channel);
2614 if (rv) {
2615 /* Got an error somehow, just give up. */
2616 intf->curr_channel = IPMI_MAX_CHANNELS;
2617 wake_up(&intf->waitq);
2619 printk(KERN_WARNING PFX
2620 "Error sending channel information: %d\n",
2621 rv);
2624 out:
2625 return;
2628 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2629 void *send_info,
2630 struct ipmi_device_id *device_id,
2631 struct device *si_dev,
2632 const char *sysfs_name,
2633 unsigned char slave_addr)
2635 int i, j;
2636 int rv;
2637 ipmi_smi_t intf;
2638 ipmi_smi_t tintf;
2639 struct list_head *link;
2641 /* Make sure the driver is actually initialized, this handles
2642 problems with initialization order. */
2643 if (!initialized) {
2644 rv = ipmi_init_msghandler();
2645 if (rv)
2646 return rv;
2647 /* The init code doesn't return an error if it was turned
2648 off, but it won't initialize. Check that. */
2649 if (!initialized)
2650 return -ENODEV;
2653 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2654 if (!intf)
2655 return -ENOMEM;
2656 memset(intf, 0, sizeof(*intf));
2658 intf->ipmi_version_major = ipmi_version_major(device_id);
2659 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2661 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2662 if (!intf->bmc) {
2663 kfree(intf);
2664 return -ENOMEM;
2666 intf->intf_num = -1; /* Mark it invalid for now. */
2667 kref_init(&intf->refcount);
2668 intf->bmc->id = *device_id;
2669 intf->si_dev = si_dev;
2670 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2671 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2672 intf->channels[j].lun = 2;
2674 if (slave_addr != 0)
2675 intf->channels[0].address = slave_addr;
2676 INIT_LIST_HEAD(&intf->users);
2677 intf->handlers = handlers;
2678 intf->send_info = send_info;
2679 spin_lock_init(&intf->seq_lock);
2680 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2681 intf->seq_table[j].inuse = 0;
2682 intf->seq_table[j].seqid = 0;
2684 intf->curr_seq = 0;
2685 #ifdef CONFIG_PROC_FS
2686 spin_lock_init(&intf->proc_entry_lock);
2687 #endif
2688 spin_lock_init(&intf->waiting_msgs_lock);
2689 INIT_LIST_HEAD(&intf->waiting_msgs);
2690 spin_lock_init(&intf->events_lock);
2691 INIT_LIST_HEAD(&intf->waiting_events);
2692 intf->waiting_events_count = 0;
2693 mutex_init(&intf->cmd_rcvrs_mutex);
2694 spin_lock_init(&intf->maintenance_mode_lock);
2695 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2696 init_waitqueue_head(&intf->waitq);
2698 spin_lock_init(&intf->counter_lock);
2699 intf->proc_dir = NULL;
2701 mutex_lock(&smi_watchers_mutex);
2702 mutex_lock(&ipmi_interfaces_mutex);
2703 /* Look for a hole in the numbers. */
2704 i = 0;
2705 link = &ipmi_interfaces;
2706 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2707 if (tintf->intf_num != i) {
2708 link = &tintf->link;
2709 break;
2711 i++;
2713 /* Add the new interface in numeric order. */
2714 if (i == 0)
2715 list_add_rcu(&intf->link, &ipmi_interfaces);
2716 else
2717 list_add_tail_rcu(&intf->link, link);
2719 rv = handlers->start_processing(send_info, intf);
2720 if (rv)
2721 goto out;
2723 get_guid(intf);
2725 if ((intf->ipmi_version_major > 1)
2726 || ((intf->ipmi_version_major == 1)
2727 && (intf->ipmi_version_minor >= 5)))
2729 /* Start scanning the channels to see what is
2730 available. */
2731 intf->null_user_handler = channel_handler;
2732 intf->curr_channel = 0;
2733 rv = send_channel_info_cmd(intf, 0);
2734 if (rv)
2735 goto out;
2737 /* Wait for the channel info to be read. */
2738 wait_event(intf->waitq,
2739 intf->curr_channel >= IPMI_MAX_CHANNELS);
2740 intf->null_user_handler = NULL;
2741 } else {
2742 /* Assume a single IPMB channel at zero. */
2743 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2744 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2747 if (rv == 0)
2748 rv = add_proc_entries(intf, i);
2750 rv = ipmi_bmc_register(intf, i, sysfs_name);
2752 out:
2753 if (rv) {
2754 if (intf->proc_dir)
2755 remove_proc_entries(intf);
2756 intf->handlers = NULL;
2757 list_del_rcu(&intf->link);
2758 mutex_unlock(&ipmi_interfaces_mutex);
2759 mutex_unlock(&smi_watchers_mutex);
2760 synchronize_rcu();
2761 kref_put(&intf->refcount, intf_free);
2762 } else {
2763 /* After this point the interface is legal to use. */
2764 intf->intf_num = i;
2765 mutex_unlock(&ipmi_interfaces_mutex);
2766 call_smi_watchers(i, intf->si_dev);
2767 mutex_unlock(&smi_watchers_mutex);
2770 return rv;
2773 static void cleanup_smi_msgs(ipmi_smi_t intf)
2775 int i;
2776 struct seq_table *ent;
2778 /* No need for locks, the interface is down. */
2779 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2780 ent = &(intf->seq_table[i]);
2781 if (!ent->inuse)
2782 continue;
2783 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2787 int ipmi_unregister_smi(ipmi_smi_t intf)
2789 struct ipmi_smi_watcher *w;
2790 int intf_num = intf->intf_num;
2792 ipmi_bmc_unregister(intf);
2794 mutex_lock(&smi_watchers_mutex);
2795 mutex_lock(&ipmi_interfaces_mutex);
2796 intf->intf_num = -1;
2797 intf->handlers = NULL;
2798 list_del_rcu(&intf->link);
2799 mutex_unlock(&ipmi_interfaces_mutex);
2800 synchronize_rcu();
2802 cleanup_smi_msgs(intf);
2804 remove_proc_entries(intf);
2806 /* Call all the watcher interfaces to tell them that
2807 an interface is gone. */
2808 list_for_each_entry(w, &smi_watchers, link)
2809 w->smi_gone(intf_num);
2810 mutex_unlock(&smi_watchers_mutex);
2812 kref_put(&intf->refcount, intf_free);
2813 return 0;
2816 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2817 struct ipmi_smi_msg *msg)
2819 struct ipmi_ipmb_addr ipmb_addr;
2820 struct ipmi_recv_msg *recv_msg;
2821 unsigned long flags;
2824 /* This is 11, not 10, because the response must contain a
2825 * completion code. */
2826 if (msg->rsp_size < 11) {
2827 /* Message not big enough, just ignore it. */
2828 spin_lock_irqsave(&intf->counter_lock, flags);
2829 intf->invalid_ipmb_responses++;
2830 spin_unlock_irqrestore(&intf->counter_lock, flags);
2831 return 0;
2834 if (msg->rsp[2] != 0) {
2835 /* An error getting the response, just ignore it. */
2836 return 0;
2839 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2840 ipmb_addr.slave_addr = msg->rsp[6];
2841 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2842 ipmb_addr.lun = msg->rsp[7] & 3;
2844 /* It's a response from a remote entity. Look up the sequence
2845 number and handle the response. */
2846 if (intf_find_seq(intf,
2847 msg->rsp[7] >> 2,
2848 msg->rsp[3] & 0x0f,
2849 msg->rsp[8],
2850 (msg->rsp[4] >> 2) & (~1),
2851 (struct ipmi_addr *) &(ipmb_addr),
2852 &recv_msg))
2854 /* We were unable to find the sequence number,
2855 so just nuke the message. */
2856 spin_lock_irqsave(&intf->counter_lock, flags);
2857 intf->unhandled_ipmb_responses++;
2858 spin_unlock_irqrestore(&intf->counter_lock, flags);
2859 return 0;
2862 memcpy(recv_msg->msg_data,
2863 &(msg->rsp[9]),
2864 msg->rsp_size - 9);
2865 /* THe other fields matched, so no need to set them, except
2866 for netfn, which needs to be the response that was
2867 returned, not the request value. */
2868 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2869 recv_msg->msg.data = recv_msg->msg_data;
2870 recv_msg->msg.data_len = msg->rsp_size - 10;
2871 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2872 spin_lock_irqsave(&intf->counter_lock, flags);
2873 intf->handled_ipmb_responses++;
2874 spin_unlock_irqrestore(&intf->counter_lock, flags);
2875 deliver_response(recv_msg);
2877 return 0;
2880 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2881 struct ipmi_smi_msg *msg)
2883 struct cmd_rcvr *rcvr;
2884 int rv = 0;
2885 unsigned char netfn;
2886 unsigned char cmd;
2887 unsigned char chan;
2888 ipmi_user_t user = NULL;
2889 struct ipmi_ipmb_addr *ipmb_addr;
2890 struct ipmi_recv_msg *recv_msg;
2891 unsigned long flags;
2892 struct ipmi_smi_handlers *handlers;
2894 if (msg->rsp_size < 10) {
2895 /* Message not big enough, just ignore it. */
2896 spin_lock_irqsave(&intf->counter_lock, flags);
2897 intf->invalid_commands++;
2898 spin_unlock_irqrestore(&intf->counter_lock, flags);
2899 return 0;
2902 if (msg->rsp[2] != 0) {
2903 /* An error getting the response, just ignore it. */
2904 return 0;
2907 netfn = msg->rsp[4] >> 2;
2908 cmd = msg->rsp[8];
2909 chan = msg->rsp[3] & 0xf;
2911 rcu_read_lock();
2912 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2913 if (rcvr) {
2914 user = rcvr->user;
2915 kref_get(&user->refcount);
2916 } else
2917 user = NULL;
2918 rcu_read_unlock();
2920 if (user == NULL) {
2921 /* We didn't find a user, deliver an error response. */
2922 spin_lock_irqsave(&intf->counter_lock, flags);
2923 intf->unhandled_commands++;
2924 spin_unlock_irqrestore(&intf->counter_lock, flags);
2926 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2927 msg->data[1] = IPMI_SEND_MSG_CMD;
2928 msg->data[2] = msg->rsp[3];
2929 msg->data[3] = msg->rsp[6];
2930 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2931 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2932 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2933 /* rqseq/lun */
2934 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2935 msg->data[8] = msg->rsp[8]; /* cmd */
2936 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2937 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2938 msg->data_size = 11;
2940 #ifdef DEBUG_MSGING
2942 int m;
2943 printk("Invalid command:");
2944 for (m = 0; m < msg->data_size; m++)
2945 printk(" %2.2x", msg->data[m]);
2946 printk("\n");
2948 #endif
2949 rcu_read_lock();
2950 handlers = intf->handlers;
2951 if (handlers) {
2952 handlers->sender(intf->send_info, msg, 0);
2953 /* We used the message, so return the value
2954 that causes it to not be freed or
2955 queued. */
2956 rv = -1;
2958 rcu_read_unlock();
2959 } else {
2960 /* Deliver the message to the user. */
2961 spin_lock_irqsave(&intf->counter_lock, flags);
2962 intf->handled_commands++;
2963 spin_unlock_irqrestore(&intf->counter_lock, flags);
2965 recv_msg = ipmi_alloc_recv_msg();
2966 if (!recv_msg) {
2967 /* We couldn't allocate memory for the
2968 message, so requeue it for handling
2969 later. */
2970 rv = 1;
2971 kref_put(&user->refcount, free_user);
2972 } else {
2973 /* Extract the source address from the data. */
2974 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2975 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2976 ipmb_addr->slave_addr = msg->rsp[6];
2977 ipmb_addr->lun = msg->rsp[7] & 3;
2978 ipmb_addr->channel = msg->rsp[3] & 0xf;
2980 /* Extract the rest of the message information
2981 from the IPMB header.*/
2982 recv_msg->user = user;
2983 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2984 recv_msg->msgid = msg->rsp[7] >> 2;
2985 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2986 recv_msg->msg.cmd = msg->rsp[8];
2987 recv_msg->msg.data = recv_msg->msg_data;
2989 /* We chop off 10, not 9 bytes because the checksum
2990 at the end also needs to be removed. */
2991 recv_msg->msg.data_len = msg->rsp_size - 10;
2992 memcpy(recv_msg->msg_data,
2993 &(msg->rsp[9]),
2994 msg->rsp_size - 10);
2995 deliver_response(recv_msg);
2999 return rv;
3002 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
3003 struct ipmi_smi_msg *msg)
3005 struct ipmi_lan_addr lan_addr;
3006 struct ipmi_recv_msg *recv_msg;
3007 unsigned long flags;
3010 /* This is 13, not 12, because the response must contain a
3011 * completion code. */
3012 if (msg->rsp_size < 13) {
3013 /* Message not big enough, just ignore it. */
3014 spin_lock_irqsave(&intf->counter_lock, flags);
3015 intf->invalid_lan_responses++;
3016 spin_unlock_irqrestore(&intf->counter_lock, flags);
3017 return 0;
3020 if (msg->rsp[2] != 0) {
3021 /* An error getting the response, just ignore it. */
3022 return 0;
3025 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3026 lan_addr.session_handle = msg->rsp[4];
3027 lan_addr.remote_SWID = msg->rsp[8];
3028 lan_addr.local_SWID = msg->rsp[5];
3029 lan_addr.channel = msg->rsp[3] & 0x0f;
3030 lan_addr.privilege = msg->rsp[3] >> 4;
3031 lan_addr.lun = msg->rsp[9] & 3;
3033 /* It's a response from a remote entity. Look up the sequence
3034 number and handle the response. */
3035 if (intf_find_seq(intf,
3036 msg->rsp[9] >> 2,
3037 msg->rsp[3] & 0x0f,
3038 msg->rsp[10],
3039 (msg->rsp[6] >> 2) & (~1),
3040 (struct ipmi_addr *) &(lan_addr),
3041 &recv_msg))
3043 /* We were unable to find the sequence number,
3044 so just nuke the message. */
3045 spin_lock_irqsave(&intf->counter_lock, flags);
3046 intf->unhandled_lan_responses++;
3047 spin_unlock_irqrestore(&intf->counter_lock, flags);
3048 return 0;
3051 memcpy(recv_msg->msg_data,
3052 &(msg->rsp[11]),
3053 msg->rsp_size - 11);
3054 /* The other fields matched, so no need to set them, except
3055 for netfn, which needs to be the response that was
3056 returned, not the request value. */
3057 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3058 recv_msg->msg.data = recv_msg->msg_data;
3059 recv_msg->msg.data_len = msg->rsp_size - 12;
3060 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3061 spin_lock_irqsave(&intf->counter_lock, flags);
3062 intf->handled_lan_responses++;
3063 spin_unlock_irqrestore(&intf->counter_lock, flags);
3064 deliver_response(recv_msg);
3066 return 0;
3069 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3070 struct ipmi_smi_msg *msg)
3072 struct cmd_rcvr *rcvr;
3073 int rv = 0;
3074 unsigned char netfn;
3075 unsigned char cmd;
3076 unsigned char chan;
3077 ipmi_user_t user = NULL;
3078 struct ipmi_lan_addr *lan_addr;
3079 struct ipmi_recv_msg *recv_msg;
3080 unsigned long flags;
3082 if (msg->rsp_size < 12) {
3083 /* Message not big enough, just ignore it. */
3084 spin_lock_irqsave(&intf->counter_lock, flags);
3085 intf->invalid_commands++;
3086 spin_unlock_irqrestore(&intf->counter_lock, flags);
3087 return 0;
3090 if (msg->rsp[2] != 0) {
3091 /* An error getting the response, just ignore it. */
3092 return 0;
3095 netfn = msg->rsp[6] >> 2;
3096 cmd = msg->rsp[10];
3097 chan = msg->rsp[3] & 0xf;
3099 rcu_read_lock();
3100 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3101 if (rcvr) {
3102 user = rcvr->user;
3103 kref_get(&user->refcount);
3104 } else
3105 user = NULL;
3106 rcu_read_unlock();
3108 if (user == NULL) {
3109 /* We didn't find a user, just give up. */
3110 spin_lock_irqsave(&intf->counter_lock, flags);
3111 intf->unhandled_commands++;
3112 spin_unlock_irqrestore(&intf->counter_lock, flags);
3114 rv = 0; /* Don't do anything with these messages, just
3115 allow them to be freed. */
3116 } else {
3117 /* Deliver the message to the user. */
3118 spin_lock_irqsave(&intf->counter_lock, flags);
3119 intf->handled_commands++;
3120 spin_unlock_irqrestore(&intf->counter_lock, flags);
3122 recv_msg = ipmi_alloc_recv_msg();
3123 if (!recv_msg) {
3124 /* We couldn't allocate memory for the
3125 message, so requeue it for handling
3126 later. */
3127 rv = 1;
3128 kref_put(&user->refcount, free_user);
3129 } else {
3130 /* Extract the source address from the data. */
3131 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3132 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3133 lan_addr->session_handle = msg->rsp[4];
3134 lan_addr->remote_SWID = msg->rsp[8];
3135 lan_addr->local_SWID = msg->rsp[5];
3136 lan_addr->lun = msg->rsp[9] & 3;
3137 lan_addr->channel = msg->rsp[3] & 0xf;
3138 lan_addr->privilege = msg->rsp[3] >> 4;
3140 /* Extract the rest of the message information
3141 from the IPMB header.*/
3142 recv_msg->user = user;
3143 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3144 recv_msg->msgid = msg->rsp[9] >> 2;
3145 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3146 recv_msg->msg.cmd = msg->rsp[10];
3147 recv_msg->msg.data = recv_msg->msg_data;
3149 /* We chop off 12, not 11 bytes because the checksum
3150 at the end also needs to be removed. */
3151 recv_msg->msg.data_len = msg->rsp_size - 12;
3152 memcpy(recv_msg->msg_data,
3153 &(msg->rsp[11]),
3154 msg->rsp_size - 12);
3155 deliver_response(recv_msg);
3159 return rv;
3162 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3163 struct ipmi_smi_msg *msg)
3165 struct ipmi_system_interface_addr *smi_addr;
3167 recv_msg->msgid = 0;
3168 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3169 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3170 smi_addr->channel = IPMI_BMC_CHANNEL;
3171 smi_addr->lun = msg->rsp[0] & 3;
3172 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3173 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3174 recv_msg->msg.cmd = msg->rsp[1];
3175 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3176 recv_msg->msg.data = recv_msg->msg_data;
3177 recv_msg->msg.data_len = msg->rsp_size - 3;
3180 static int handle_read_event_rsp(ipmi_smi_t intf,
3181 struct ipmi_smi_msg *msg)
3183 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3184 struct list_head msgs;
3185 ipmi_user_t user;
3186 int rv = 0;
3187 int deliver_count = 0;
3188 unsigned long flags;
3190 if (msg->rsp_size < 19) {
3191 /* Message is too small to be an IPMB event. */
3192 spin_lock_irqsave(&intf->counter_lock, flags);
3193 intf->invalid_events++;
3194 spin_unlock_irqrestore(&intf->counter_lock, flags);
3195 return 0;
3198 if (msg->rsp[2] != 0) {
3199 /* An error getting the event, just ignore it. */
3200 return 0;
3203 INIT_LIST_HEAD(&msgs);
3205 spin_lock_irqsave(&intf->events_lock, flags);
3207 spin_lock(&intf->counter_lock);
3208 intf->events++;
3209 spin_unlock(&intf->counter_lock);
3211 /* Allocate and fill in one message for every user that is getting
3212 events. */
3213 rcu_read_lock();
3214 list_for_each_entry_rcu(user, &intf->users, link) {
3215 if (!user->gets_events)
3216 continue;
3218 recv_msg = ipmi_alloc_recv_msg();
3219 if (!recv_msg) {
3220 rcu_read_unlock();
3221 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3222 link) {
3223 list_del(&recv_msg->link);
3224 ipmi_free_recv_msg(recv_msg);
3226 /* We couldn't allocate memory for the
3227 message, so requeue it for handling
3228 later. */
3229 rv = 1;
3230 goto out;
3233 deliver_count++;
3235 copy_event_into_recv_msg(recv_msg, msg);
3236 recv_msg->user = user;
3237 kref_get(&user->refcount);
3238 list_add_tail(&(recv_msg->link), &msgs);
3240 rcu_read_unlock();
3242 if (deliver_count) {
3243 /* Now deliver all the messages. */
3244 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3245 list_del(&recv_msg->link);
3246 deliver_response(recv_msg);
3248 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3249 /* No one to receive the message, put it in queue if there's
3250 not already too many things in the queue. */
3251 recv_msg = ipmi_alloc_recv_msg();
3252 if (!recv_msg) {
3253 /* We couldn't allocate memory for the
3254 message, so requeue it for handling
3255 later. */
3256 rv = 1;
3257 goto out;
3260 copy_event_into_recv_msg(recv_msg, msg);
3261 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3262 intf->waiting_events_count++;
3263 } else {
3264 /* There's too many things in the queue, discard this
3265 message. */
3266 printk(KERN_WARNING PFX "Event queue full, discarding an"
3267 " incoming event\n");
3270 out:
3271 spin_unlock_irqrestore(&(intf->events_lock), flags);
3273 return rv;
3276 static int handle_bmc_rsp(ipmi_smi_t intf,
3277 struct ipmi_smi_msg *msg)
3279 struct ipmi_recv_msg *recv_msg;
3280 unsigned long flags;
3281 struct ipmi_user *user;
3283 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3284 if (recv_msg == NULL)
3286 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3287 "could be because of a malformed message, or\n"
3288 "because of a hardware error. Contact your\n"
3289 "hardware vender for assistance\n");
3290 return 0;
3293 user = recv_msg->user;
3294 /* Make sure the user still exists. */
3295 if (user && !user->valid) {
3296 /* The user for the message went away, so give up. */
3297 spin_lock_irqsave(&intf->counter_lock, flags);
3298 intf->unhandled_local_responses++;
3299 spin_unlock_irqrestore(&intf->counter_lock, flags);
3300 ipmi_free_recv_msg(recv_msg);
3301 } else {
3302 struct ipmi_system_interface_addr *smi_addr;
3304 spin_lock_irqsave(&intf->counter_lock, flags);
3305 intf->handled_local_responses++;
3306 spin_unlock_irqrestore(&intf->counter_lock, flags);
3307 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3308 recv_msg->msgid = msg->msgid;
3309 smi_addr = ((struct ipmi_system_interface_addr *)
3310 &(recv_msg->addr));
3311 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3312 smi_addr->channel = IPMI_BMC_CHANNEL;
3313 smi_addr->lun = msg->rsp[0] & 3;
3314 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3315 recv_msg->msg.cmd = msg->rsp[1];
3316 memcpy(recv_msg->msg_data,
3317 &(msg->rsp[2]),
3318 msg->rsp_size - 2);
3319 recv_msg->msg.data = recv_msg->msg_data;
3320 recv_msg->msg.data_len = msg->rsp_size - 2;
3321 deliver_response(recv_msg);
3324 return 0;
3327 /* Handle a new message. Return 1 if the message should be requeued,
3328 0 if the message should be freed, or -1 if the message should not
3329 be freed or requeued. */
3330 static int handle_new_recv_msg(ipmi_smi_t intf,
3331 struct ipmi_smi_msg *msg)
3333 int requeue;
3334 int chan;
3336 #ifdef DEBUG_MSGING
3337 int m;
3338 printk("Recv:");
3339 for (m = 0; m < msg->rsp_size; m++)
3340 printk(" %2.2x", msg->rsp[m]);
3341 printk("\n");
3342 #endif
3343 if (msg->rsp_size < 2) {
3344 /* Message is too small to be correct. */
3345 printk(KERN_WARNING PFX "BMC returned to small a message"
3346 " for netfn %x cmd %x, got %d bytes\n",
3347 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3349 /* Generate an error response for the message. */
3350 msg->rsp[0] = msg->data[0] | (1 << 2);
3351 msg->rsp[1] = msg->data[1];
3352 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3353 msg->rsp_size = 3;
3354 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3355 || (msg->rsp[1] != msg->data[1])) /* Command */
3357 /* The response is not even marginally correct. */
3358 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3359 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3360 (msg->data[0] >> 2) | 1, msg->data[1],
3361 msg->rsp[0] >> 2, msg->rsp[1]);
3363 /* Generate an error response for the message. */
3364 msg->rsp[0] = msg->data[0] | (1 << 2);
3365 msg->rsp[1] = msg->data[1];
3366 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3367 msg->rsp_size = 3;
3370 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3371 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3372 && (msg->user_data != NULL))
3374 /* It's a response to a response we sent. For this we
3375 deliver a send message response to the user. */
3376 struct ipmi_recv_msg *recv_msg = msg->user_data;
3378 requeue = 0;
3379 if (msg->rsp_size < 2)
3380 /* Message is too small to be correct. */
3381 goto out;
3383 chan = msg->data[2] & 0x0f;
3384 if (chan >= IPMI_MAX_CHANNELS)
3385 /* Invalid channel number */
3386 goto out;
3388 if (!recv_msg)
3389 goto out;
3391 /* Make sure the user still exists. */
3392 if (!recv_msg->user || !recv_msg->user->valid)
3393 goto out;
3395 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3396 recv_msg->msg.data = recv_msg->msg_data;
3397 recv_msg->msg.data_len = 1;
3398 recv_msg->msg_data[0] = msg->rsp[2];
3399 deliver_response(recv_msg);
3400 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3401 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3403 /* It's from the receive queue. */
3404 chan = msg->rsp[3] & 0xf;
3405 if (chan >= IPMI_MAX_CHANNELS) {
3406 /* Invalid channel number */
3407 requeue = 0;
3408 goto out;
3411 switch (intf->channels[chan].medium) {
3412 case IPMI_CHANNEL_MEDIUM_IPMB:
3413 if (msg->rsp[4] & 0x04) {
3414 /* It's a response, so find the
3415 requesting message and send it up. */
3416 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3417 } else {
3418 /* It's a command to the SMS from some other
3419 entity. Handle that. */
3420 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3422 break;
3424 case IPMI_CHANNEL_MEDIUM_8023LAN:
3425 case IPMI_CHANNEL_MEDIUM_ASYNC:
3426 if (msg->rsp[6] & 0x04) {
3427 /* It's a response, so find the
3428 requesting message and send it up. */
3429 requeue = handle_lan_get_msg_rsp(intf, msg);
3430 } else {
3431 /* It's a command to the SMS from some other
3432 entity. Handle that. */
3433 requeue = handle_lan_get_msg_cmd(intf, msg);
3435 break;
3437 default:
3438 /* We don't handle the channel type, so just
3439 * free the message. */
3440 requeue = 0;
3443 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3444 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3446 /* It's an asyncronous event. */
3447 requeue = handle_read_event_rsp(intf, msg);
3448 } else {
3449 /* It's a response from the local BMC. */
3450 requeue = handle_bmc_rsp(intf, msg);
3453 out:
3454 return requeue;
3457 /* Handle a new message from the lower layer. */
3458 void ipmi_smi_msg_received(ipmi_smi_t intf,
3459 struct ipmi_smi_msg *msg)
3461 unsigned long flags;
3462 int rv;
3465 if ((msg->data_size >= 2)
3466 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3467 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3468 && (msg->user_data == NULL))
3470 /* This is the local response to a command send, start
3471 the timer for these. The user_data will not be
3472 NULL if this is a response send, and we will let
3473 response sends just go through. */
3475 /* Check for errors, if we get certain errors (ones
3476 that mean basically we can try again later), we
3477 ignore them and start the timer. Otherwise we
3478 report the error immediately. */
3479 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3480 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3481 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3482 && (msg->rsp[2] != IPMI_BUS_ERR)
3483 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3485 int chan = msg->rsp[3] & 0xf;
3487 /* Got an error sending the message, handle it. */
3488 spin_lock_irqsave(&intf->counter_lock, flags);
3489 if (chan >= IPMI_MAX_CHANNELS)
3490 ; /* This shouldn't happen */
3491 else if ((intf->channels[chan].medium
3492 == IPMI_CHANNEL_MEDIUM_8023LAN)
3493 || (intf->channels[chan].medium
3494 == IPMI_CHANNEL_MEDIUM_ASYNC))
3495 intf->sent_lan_command_errs++;
3496 else
3497 intf->sent_ipmb_command_errs++;
3498 spin_unlock_irqrestore(&intf->counter_lock, flags);
3499 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3500 } else {
3501 /* The message was sent, start the timer. */
3502 intf_start_seq_timer(intf, msg->msgid);
3505 ipmi_free_smi_msg(msg);
3506 goto out;
3509 /* To preserve message order, if the list is not empty, we
3510 tack this message onto the end of the list. */
3511 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3512 if (!list_empty(&intf->waiting_msgs)) {
3513 list_add_tail(&msg->link, &intf->waiting_msgs);
3514 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3515 goto out;
3517 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3519 rv = handle_new_recv_msg(intf, msg);
3520 if (rv > 0) {
3521 /* Could not handle the message now, just add it to a
3522 list to handle later. */
3523 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3524 list_add_tail(&msg->link, &intf->waiting_msgs);
3525 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3526 } else if (rv == 0) {
3527 ipmi_free_smi_msg(msg);
3530 out:
3531 return;
3534 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3536 ipmi_user_t user;
3538 rcu_read_lock();
3539 list_for_each_entry_rcu(user, &intf->users, link) {
3540 if (!user->handler->ipmi_watchdog_pretimeout)
3541 continue;
3543 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3545 rcu_read_unlock();
3549 static struct ipmi_smi_msg *
3550 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3551 unsigned char seq, long seqid)
3553 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3554 if (!smi_msg)
3555 /* If we can't allocate the message, then just return, we
3556 get 4 retries, so this should be ok. */
3557 return NULL;
3559 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3560 smi_msg->data_size = recv_msg->msg.data_len;
3561 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3563 #ifdef DEBUG_MSGING
3565 int m;
3566 printk("Resend: ");
3567 for (m = 0; m < smi_msg->data_size; m++)
3568 printk(" %2.2x", smi_msg->data[m]);
3569 printk("\n");
3571 #endif
3572 return smi_msg;
3575 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3576 struct list_head *timeouts, long timeout_period,
3577 int slot, unsigned long *flags)
3579 struct ipmi_recv_msg *msg;
3580 struct ipmi_smi_handlers *handlers;
3582 if (intf->intf_num == -1)
3583 return;
3585 if (!ent->inuse)
3586 return;
3588 ent->timeout -= timeout_period;
3589 if (ent->timeout > 0)
3590 return;
3592 if (ent->retries_left == 0) {
3593 /* The message has used all its retries. */
3594 ent->inuse = 0;
3595 msg = ent->recv_msg;
3596 list_add_tail(&msg->link, timeouts);
3597 spin_lock(&intf->counter_lock);
3598 if (ent->broadcast)
3599 intf->timed_out_ipmb_broadcasts++;
3600 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3601 intf->timed_out_lan_commands++;
3602 else
3603 intf->timed_out_ipmb_commands++;
3604 spin_unlock(&intf->counter_lock);
3605 } else {
3606 struct ipmi_smi_msg *smi_msg;
3607 /* More retries, send again. */
3609 /* Start with the max timer, set to normal
3610 timer after the message is sent. */
3611 ent->timeout = MAX_MSG_TIMEOUT;
3612 ent->retries_left--;
3613 spin_lock(&intf->counter_lock);
3614 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3615 intf->retransmitted_lan_commands++;
3616 else
3617 intf->retransmitted_ipmb_commands++;
3618 spin_unlock(&intf->counter_lock);
3620 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3621 ent->seqid);
3622 if (!smi_msg)
3623 return;
3625 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3627 /* Send the new message. We send with a zero
3628 * priority. It timed out, I doubt time is
3629 * that critical now, and high priority
3630 * messages are really only for messages to the
3631 * local MC, which don't get resent. */
3632 handlers = intf->handlers;
3633 if (handlers)
3634 intf->handlers->sender(intf->send_info,
3635 smi_msg, 0);
3636 else
3637 ipmi_free_smi_msg(smi_msg);
3639 spin_lock_irqsave(&intf->seq_lock, *flags);
3643 static void ipmi_timeout_handler(long timeout_period)
3645 ipmi_smi_t intf;
3646 struct list_head timeouts;
3647 struct ipmi_recv_msg *msg, *msg2;
3648 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3649 unsigned long flags;
3650 int i;
3652 rcu_read_lock();
3653 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3654 /* See if any waiting messages need to be processed. */
3655 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3656 list_for_each_entry_safe(smi_msg, smi_msg2,
3657 &intf->waiting_msgs, link) {
3658 if (!handle_new_recv_msg(intf, smi_msg)) {
3659 list_del(&smi_msg->link);
3660 ipmi_free_smi_msg(smi_msg);
3661 } else {
3662 /* To preserve message order, quit if we
3663 can't handle a message. */
3664 break;
3667 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3669 /* Go through the seq table and find any messages that
3670 have timed out, putting them in the timeouts
3671 list. */
3672 INIT_LIST_HEAD(&timeouts);
3673 spin_lock_irqsave(&intf->seq_lock, flags);
3674 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3675 check_msg_timeout(intf, &(intf->seq_table[i]),
3676 &timeouts, timeout_period, i,
3677 &flags);
3678 spin_unlock_irqrestore(&intf->seq_lock, flags);
3680 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3681 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3684 * Maintenance mode handling. Check the timeout
3685 * optimistically before we claim the lock. It may
3686 * mean a timeout gets missed occasionally, but that
3687 * only means the timeout gets extended by one period
3688 * in that case. No big deal, and it avoids the lock
3689 * most of the time.
3691 if (intf->auto_maintenance_timeout > 0) {
3692 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
3693 if (intf->auto_maintenance_timeout > 0) {
3694 intf->auto_maintenance_timeout
3695 -= timeout_period;
3696 if (!intf->maintenance_mode
3697 && (intf->auto_maintenance_timeout <= 0))
3699 intf->maintenance_mode_enable = 0;
3700 maintenance_mode_update(intf);
3703 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
3704 flags);
3707 rcu_read_unlock();
3710 static void ipmi_request_event(void)
3712 ipmi_smi_t intf;
3713 struct ipmi_smi_handlers *handlers;
3715 rcu_read_lock();
3716 /* Called from the timer, no need to check if handlers is
3717 * valid. */
3718 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3719 /* No event requests when in maintenance mode. */
3720 if (intf->maintenance_mode_enable)
3721 continue;
3723 handlers = intf->handlers;
3724 if (handlers)
3725 handlers->request_events(intf->send_info);
3727 rcu_read_unlock();
3730 static struct timer_list ipmi_timer;
3732 /* Call every ~100 ms. */
3733 #define IPMI_TIMEOUT_TIME 100
3735 /* How many jiffies does it take to get to the timeout time. */
3736 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3738 /* Request events from the queue every second (this is the number of
3739 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3740 future, IPMI will add a way to know immediately if an event is in
3741 the queue and this silliness can go away. */
3742 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3744 static atomic_t stop_operation;
3745 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3747 static void ipmi_timeout(unsigned long data)
3749 if (atomic_read(&stop_operation))
3750 return;
3752 ticks_to_req_ev--;
3753 if (ticks_to_req_ev == 0) {
3754 ipmi_request_event();
3755 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3758 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3760 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3764 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3765 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3767 /* FIXME - convert these to slabs. */
3768 static void free_smi_msg(struct ipmi_smi_msg *msg)
3770 atomic_dec(&smi_msg_inuse_count);
3771 kfree(msg);
3774 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3776 struct ipmi_smi_msg *rv;
3777 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3778 if (rv) {
3779 rv->done = free_smi_msg;
3780 rv->user_data = NULL;
3781 atomic_inc(&smi_msg_inuse_count);
3783 return rv;
3786 static void free_recv_msg(struct ipmi_recv_msg *msg)
3788 atomic_dec(&recv_msg_inuse_count);
3789 kfree(msg);
3792 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3794 struct ipmi_recv_msg *rv;
3796 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3797 if (rv) {
3798 rv->user = NULL;
3799 rv->done = free_recv_msg;
3800 atomic_inc(&recv_msg_inuse_count);
3802 return rv;
3805 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3807 if (msg->user)
3808 kref_put(&msg->user->refcount, free_user);
3809 msg->done(msg);
3812 #ifdef CONFIG_IPMI_PANIC_EVENT
3814 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3818 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3822 #ifdef CONFIG_IPMI_PANIC_STRING
3823 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3825 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3826 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3827 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3828 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3830 /* A get event receiver command, save it. */
3831 intf->event_receiver = msg->msg.data[1];
3832 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3836 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3838 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3839 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3840 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3841 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3843 /* A get device id command, save if we are an event
3844 receiver or generator. */
3845 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3846 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3849 #endif
3851 static void send_panic_events(char *str)
3853 struct kernel_ipmi_msg msg;
3854 ipmi_smi_t intf;
3855 unsigned char data[16];
3856 struct ipmi_system_interface_addr *si;
3857 struct ipmi_addr addr;
3858 struct ipmi_smi_msg smi_msg;
3859 struct ipmi_recv_msg recv_msg;
3861 si = (struct ipmi_system_interface_addr *) &addr;
3862 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3863 si->channel = IPMI_BMC_CHANNEL;
3864 si->lun = 0;
3866 /* Fill in an event telling that we have failed. */
3867 msg.netfn = 0x04; /* Sensor or Event. */
3868 msg.cmd = 2; /* Platform event command. */
3869 msg.data = data;
3870 msg.data_len = 8;
3871 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3872 data[1] = 0x03; /* This is for IPMI 1.0. */
3873 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3874 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3875 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3877 /* Put a few breadcrumbs in. Hopefully later we can add more things
3878 to make the panic events more useful. */
3879 if (str) {
3880 data[3] = str[0];
3881 data[6] = str[1];
3882 data[7] = str[2];
3885 smi_msg.done = dummy_smi_done_handler;
3886 recv_msg.done = dummy_recv_done_handler;
3888 /* For every registered interface, send the event. */
3889 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3890 if (!intf->handlers)
3891 /* Interface is not ready. */
3892 continue;
3894 /* Send the event announcing the panic. */
3895 intf->handlers->set_run_to_completion(intf->send_info, 1);
3896 i_ipmi_request(NULL,
3897 intf,
3898 &addr,
3900 &msg,
3901 intf,
3902 &smi_msg,
3903 &recv_msg,
3905 intf->channels[0].address,
3906 intf->channels[0].lun,
3907 0, 1); /* Don't retry, and don't wait. */
3910 #ifdef CONFIG_IPMI_PANIC_STRING
3911 /* On every interface, dump a bunch of OEM event holding the
3912 string. */
3913 if (!str)
3914 return;
3916 /* For every registered interface, send the event. */
3917 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3918 char *p = str;
3919 struct ipmi_ipmb_addr *ipmb;
3920 int j;
3922 if (intf->intf_num == -1)
3923 /* Interface was not ready yet. */
3924 continue;
3926 /* First job here is to figure out where to send the
3927 OEM events. There's no way in IPMI to send OEM
3928 events using an event send command, so we have to
3929 find the SEL to put them in and stick them in
3930 there. */
3932 /* Get capabilities from the get device id. */
3933 intf->local_sel_device = 0;
3934 intf->local_event_generator = 0;
3935 intf->event_receiver = 0;
3937 /* Request the device info from the local MC. */
3938 msg.netfn = IPMI_NETFN_APP_REQUEST;
3939 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3940 msg.data = NULL;
3941 msg.data_len = 0;
3942 intf->null_user_handler = device_id_fetcher;
3943 i_ipmi_request(NULL,
3944 intf,
3945 &addr,
3947 &msg,
3948 intf,
3949 &smi_msg,
3950 &recv_msg,
3952 intf->channels[0].address,
3953 intf->channels[0].lun,
3954 0, 1); /* Don't retry, and don't wait. */
3956 if (intf->local_event_generator) {
3957 /* Request the event receiver from the local MC. */
3958 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3959 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3960 msg.data = NULL;
3961 msg.data_len = 0;
3962 intf->null_user_handler = event_receiver_fetcher;
3963 i_ipmi_request(NULL,
3964 intf,
3965 &addr,
3967 &msg,
3968 intf,
3969 &smi_msg,
3970 &recv_msg,
3972 intf->channels[0].address,
3973 intf->channels[0].lun,
3974 0, 1); /* no retry, and no wait. */
3976 intf->null_user_handler = NULL;
3978 /* Validate the event receiver. The low bit must not
3979 be 1 (it must be a valid IPMB address), it cannot
3980 be zero, and it must not be my address. */
3981 if (((intf->event_receiver & 1) == 0)
3982 && (intf->event_receiver != 0)
3983 && (intf->event_receiver != intf->channels[0].address))
3985 /* The event receiver is valid, send an IPMB
3986 message. */
3987 ipmb = (struct ipmi_ipmb_addr *) &addr;
3988 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3989 ipmb->channel = 0; /* FIXME - is this right? */
3990 ipmb->lun = intf->event_receiver_lun;
3991 ipmb->slave_addr = intf->event_receiver;
3992 } else if (intf->local_sel_device) {
3993 /* The event receiver was not valid (or was
3994 me), but I am an SEL device, just dump it
3995 in my SEL. */
3996 si = (struct ipmi_system_interface_addr *) &addr;
3997 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3998 si->channel = IPMI_BMC_CHANNEL;
3999 si->lun = 0;
4000 } else
4001 continue; /* No where to send the event. */
4004 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4005 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4006 msg.data = data;
4007 msg.data_len = 16;
4009 j = 0;
4010 while (*p) {
4011 int size = strlen(p);
4013 if (size > 11)
4014 size = 11;
4015 data[0] = 0;
4016 data[1] = 0;
4017 data[2] = 0xf0; /* OEM event without timestamp. */
4018 data[3] = intf->channels[0].address;
4019 data[4] = j++; /* sequence # */
4020 /* Always give 11 bytes, so strncpy will fill
4021 it with zeroes for me. */
4022 strncpy(data+5, p, 11);
4023 p += size;
4025 i_ipmi_request(NULL,
4026 intf,
4027 &addr,
4029 &msg,
4030 intf,
4031 &smi_msg,
4032 &recv_msg,
4034 intf->channels[0].address,
4035 intf->channels[0].lun,
4036 0, 1); /* no retry, and no wait. */
4039 #endif /* CONFIG_IPMI_PANIC_STRING */
4041 #endif /* CONFIG_IPMI_PANIC_EVENT */
4043 static int has_panicked;
4045 static int panic_event(struct notifier_block *this,
4046 unsigned long event,
4047 void *ptr)
4049 ipmi_smi_t intf;
4051 if (has_panicked)
4052 return NOTIFY_DONE;
4053 has_panicked = 1;
4055 /* For every registered interface, set it to run to completion. */
4056 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4057 if (!intf->handlers)
4058 /* Interface is not ready. */
4059 continue;
4061 intf->handlers->set_run_to_completion(intf->send_info, 1);
4064 #ifdef CONFIG_IPMI_PANIC_EVENT
4065 send_panic_events(ptr);
4066 #endif
4068 return NOTIFY_DONE;
4071 static struct notifier_block panic_block = {
4072 .notifier_call = panic_event,
4073 .next = NULL,
4074 .priority = 200 /* priority: INT_MAX >= x >= 0 */
4077 static int ipmi_init_msghandler(void)
4079 int rv;
4081 if (initialized)
4082 return 0;
4084 rv = driver_register(&ipmidriver);
4085 if (rv) {
4086 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4087 return rv;
4090 printk(KERN_INFO "ipmi message handler version "
4091 IPMI_DRIVER_VERSION "\n");
4093 #ifdef CONFIG_PROC_FS
4094 proc_ipmi_root = proc_mkdir("ipmi", NULL);
4095 if (!proc_ipmi_root) {
4096 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4097 return -ENOMEM;
4100 proc_ipmi_root->owner = THIS_MODULE;
4101 #endif /* CONFIG_PROC_FS */
4103 setup_timer(&ipmi_timer, ipmi_timeout, 0);
4104 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4106 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4108 initialized = 1;
4110 return 0;
4113 static __init int ipmi_init_msghandler_mod(void)
4115 ipmi_init_msghandler();
4116 return 0;
4119 static __exit void cleanup_ipmi(void)
4121 int count;
4123 if (!initialized)
4124 return;
4126 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4128 /* This can't be called if any interfaces exist, so no worry about
4129 shutting down the interfaces. */
4131 /* Tell the timer to stop, then wait for it to stop. This avoids
4132 problems with race conditions removing the timer here. */
4133 atomic_inc(&stop_operation);
4134 del_timer_sync(&ipmi_timer);
4136 #ifdef CONFIG_PROC_FS
4137 remove_proc_entry(proc_ipmi_root->name, &proc_root);
4138 #endif /* CONFIG_PROC_FS */
4140 driver_unregister(&ipmidriver);
4142 initialized = 0;
4144 /* Check for buffer leaks. */
4145 count = atomic_read(&smi_msg_inuse_count);
4146 if (count != 0)
4147 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4148 count);
4149 count = atomic_read(&recv_msg_inuse_count);
4150 if (count != 0)
4151 printk(KERN_WARNING PFX "recv message count %d at exit\n",
4152 count);
4154 module_exit(cleanup_ipmi);
4156 module_init(ipmi_init_msghandler_mod);
4157 MODULE_LICENSE("GPL");
4158 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4159 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4160 MODULE_VERSION(IPMI_DRIVER_VERSION);
4162 EXPORT_SYMBOL(ipmi_create_user);
4163 EXPORT_SYMBOL(ipmi_destroy_user);
4164 EXPORT_SYMBOL(ipmi_get_version);
4165 EXPORT_SYMBOL(ipmi_request_settime);
4166 EXPORT_SYMBOL(ipmi_request_supply_msgs);
4167 EXPORT_SYMBOL(ipmi_register_smi);
4168 EXPORT_SYMBOL(ipmi_unregister_smi);
4169 EXPORT_SYMBOL(ipmi_register_for_cmd);
4170 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4171 EXPORT_SYMBOL(ipmi_smi_msg_received);
4172 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4173 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4174 EXPORT_SYMBOL(ipmi_addr_length);
4175 EXPORT_SYMBOL(ipmi_validate_addr);
4176 EXPORT_SYMBOL(ipmi_set_gets_events);
4177 EXPORT_SYMBOL(ipmi_smi_watcher_register);
4178 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4179 EXPORT_SYMBOL(ipmi_set_my_address);
4180 EXPORT_SYMBOL(ipmi_get_my_address);
4181 EXPORT_SYMBOL(ipmi_set_my_LUN);
4182 EXPORT_SYMBOL(ipmi_get_my_LUN);
4183 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4184 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
4185 EXPORT_SYMBOL(ipmi_free_recv_msg);