USB: rename USB quirk to USB_QUIRK_ENDPOINT_IGNORE
[linux/fpc-iii.git] / drivers / char / ipmi / ipmi_msghandler.c
blobe1b22fe0916cf8c85ab8172e62f53c1bf9307d01
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * ipmi_msghandler.c
5 * Incoming and outgoing message routing for an IPMI interface.
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
11 * Copyright 2002 MontaVista Software Inc.
14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
15 #define dev_fmt pr_fmt
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/spinlock.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/ipmi.h>
26 #include <linux/ipmi_smi.h>
27 #include <linux/notifier.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/rcupdate.h>
31 #include <linux/interrupt.h>
32 #include <linux/moduleparam.h>
33 #include <linux/workqueue.h>
34 #include <linux/uuid.h>
35 #include <linux/nospec.h>
36 #include <linux/vmalloc.h>
38 #define IPMI_DRIVER_VERSION "39.2"
40 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
41 static int ipmi_init_msghandler(void);
42 static void smi_recv_tasklet(unsigned long);
43 static void handle_new_recv_msgs(struct ipmi_smi *intf);
44 static void need_waiter(struct ipmi_smi *intf);
45 static int handle_one_recv_msg(struct ipmi_smi *intf,
46 struct ipmi_smi_msg *msg);
48 static bool initialized;
49 static bool drvregistered;
51 enum ipmi_panic_event_op {
52 IPMI_SEND_PANIC_EVENT_NONE,
53 IPMI_SEND_PANIC_EVENT,
54 IPMI_SEND_PANIC_EVENT_STRING
56 #ifdef CONFIG_IPMI_PANIC_STRING
57 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
58 #elif defined(CONFIG_IPMI_PANIC_EVENT)
59 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
60 #else
61 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
62 #endif
63 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
65 static int panic_op_write_handler(const char *val,
66 const struct kernel_param *kp)
68 char valcp[16];
69 char *s;
71 strncpy(valcp, val, 15);
72 valcp[15] = '\0';
74 s = strstrip(valcp);
76 if (strcmp(s, "none") == 0)
77 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
78 else if (strcmp(s, "event") == 0)
79 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
80 else if (strcmp(s, "string") == 0)
81 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
82 else
83 return -EINVAL;
85 return 0;
88 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
90 switch (ipmi_send_panic_event) {
91 case IPMI_SEND_PANIC_EVENT_NONE:
92 strcpy(buffer, "none");
93 break;
95 case IPMI_SEND_PANIC_EVENT:
96 strcpy(buffer, "event");
97 break;
99 case IPMI_SEND_PANIC_EVENT_STRING:
100 strcpy(buffer, "string");
101 break;
103 default:
104 strcpy(buffer, "???");
105 break;
108 return strlen(buffer);
111 static const struct kernel_param_ops panic_op_ops = {
112 .set = panic_op_write_handler,
113 .get = panic_op_read_handler
115 module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
116 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
119 #define MAX_EVENTS_IN_QUEUE 25
121 /* Remain in auto-maintenance mode for this amount of time (in ms). */
122 static unsigned long maintenance_mode_timeout_ms = 30000;
123 module_param(maintenance_mode_timeout_ms, ulong, 0644);
124 MODULE_PARM_DESC(maintenance_mode_timeout_ms,
125 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
128 * Don't let a message sit in a queue forever, always time it with at lest
129 * the max message timer. This is in milliseconds.
131 #define MAX_MSG_TIMEOUT 60000
134 * Timeout times below are in milliseconds, and are done off a 1
135 * second timer. So setting the value to 1000 would mean anything
136 * between 0 and 1000ms. So really the only reasonable minimum
137 * setting it 2000ms, which is between 1 and 2 seconds.
140 /* The default timeout for message retries. */
141 static unsigned long default_retry_ms = 2000;
142 module_param(default_retry_ms, ulong, 0644);
143 MODULE_PARM_DESC(default_retry_ms,
144 "The time (milliseconds) between retry sends");
146 /* The default timeout for maintenance mode message retries. */
147 static unsigned long default_maintenance_retry_ms = 3000;
148 module_param(default_maintenance_retry_ms, ulong, 0644);
149 MODULE_PARM_DESC(default_maintenance_retry_ms,
150 "The time (milliseconds) between retry sends in maintenance mode");
152 /* The default maximum number of retries */
153 static unsigned int default_max_retries = 4;
154 module_param(default_max_retries, uint, 0644);
155 MODULE_PARM_DESC(default_max_retries,
156 "The time (milliseconds) between retry sends in maintenance mode");
158 /* Call every ~1000 ms. */
159 #define IPMI_TIMEOUT_TIME 1000
161 /* How many jiffies does it take to get to the timeout time. */
162 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
165 * Request events from the queue every second (this is the number of
166 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
167 * future, IPMI will add a way to know immediately if an event is in
168 * the queue and this silliness can go away.
170 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
172 /* How long should we cache dynamic device IDs? */
173 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
176 * The main "user" data structure.
178 struct ipmi_user {
179 struct list_head link;
182 * Set to NULL when the user is destroyed, a pointer to myself
183 * so srcu_dereference can be used on it.
185 struct ipmi_user *self;
186 struct srcu_struct release_barrier;
188 struct kref refcount;
190 /* The upper layer that handles receive messages. */
191 const struct ipmi_user_hndl *handler;
192 void *handler_data;
194 /* The interface this user is bound to. */
195 struct ipmi_smi *intf;
197 /* Does this interface receive IPMI events? */
198 bool gets_events;
200 /* Free must run in process context for RCU cleanup. */
201 struct work_struct remove_work;
204 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
205 __acquires(user->release_barrier)
207 struct ipmi_user *ruser;
209 *index = srcu_read_lock(&user->release_barrier);
210 ruser = srcu_dereference(user->self, &user->release_barrier);
211 if (!ruser)
212 srcu_read_unlock(&user->release_barrier, *index);
213 return ruser;
216 static void release_ipmi_user(struct ipmi_user *user, int index)
218 srcu_read_unlock(&user->release_barrier, index);
221 struct cmd_rcvr {
222 struct list_head link;
224 struct ipmi_user *user;
225 unsigned char netfn;
226 unsigned char cmd;
227 unsigned int chans;
230 * This is used to form a linked lised during mass deletion.
231 * Since this is in an RCU list, we cannot use the link above
232 * or change any data until the RCU period completes. So we
233 * use this next variable during mass deletion so we can have
234 * a list and don't have to wait and restart the search on
235 * every individual deletion of a command.
237 struct cmd_rcvr *next;
240 struct seq_table {
241 unsigned int inuse : 1;
242 unsigned int broadcast : 1;
244 unsigned long timeout;
245 unsigned long orig_timeout;
246 unsigned int retries_left;
249 * To verify on an incoming send message response that this is
250 * the message that the response is for, we keep a sequence id
251 * and increment it every time we send a message.
253 long seqid;
256 * This is held so we can properly respond to the message on a
257 * timeout, and it is used to hold the temporary data for
258 * retransmission, too.
260 struct ipmi_recv_msg *recv_msg;
264 * Store the information in a msgid (long) to allow us to find a
265 * sequence table entry from the msgid.
267 #define STORE_SEQ_IN_MSGID(seq, seqid) \
268 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
270 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
271 do { \
272 seq = (((msgid) >> 26) & 0x3f); \
273 seqid = ((msgid) & 0x3ffffff); \
274 } while (0)
276 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
278 #define IPMI_MAX_CHANNELS 16
279 struct ipmi_channel {
280 unsigned char medium;
281 unsigned char protocol;
284 struct ipmi_channel_set {
285 struct ipmi_channel c[IPMI_MAX_CHANNELS];
288 struct ipmi_my_addrinfo {
290 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
291 * but may be changed by the user.
293 unsigned char address;
296 * My LUN. This should generally stay the SMS LUN, but just in
297 * case...
299 unsigned char lun;
303 * Note that the product id, manufacturer id, guid, and device id are
304 * immutable in this structure, so dyn_mutex is not required for
305 * accessing those. If those change on a BMC, a new BMC is allocated.
307 struct bmc_device {
308 struct platform_device pdev;
309 struct list_head intfs; /* Interfaces on this BMC. */
310 struct ipmi_device_id id;
311 struct ipmi_device_id fetch_id;
312 int dyn_id_set;
313 unsigned long dyn_id_expiry;
314 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
315 guid_t guid;
316 guid_t fetch_guid;
317 int dyn_guid_set;
318 struct kref usecount;
319 struct work_struct remove_work;
321 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
323 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
324 struct ipmi_device_id *id,
325 bool *guid_set, guid_t *guid);
328 * Various statistics for IPMI, these index stats[] in the ipmi_smi
329 * structure.
331 enum ipmi_stat_indexes {
332 /* Commands we got from the user that were invalid. */
333 IPMI_STAT_sent_invalid_commands = 0,
335 /* Commands we sent to the MC. */
336 IPMI_STAT_sent_local_commands,
338 /* Responses from the MC that were delivered to a user. */
339 IPMI_STAT_handled_local_responses,
341 /* Responses from the MC that were not delivered to a user. */
342 IPMI_STAT_unhandled_local_responses,
344 /* Commands we sent out to the IPMB bus. */
345 IPMI_STAT_sent_ipmb_commands,
347 /* Commands sent on the IPMB that had errors on the SEND CMD */
348 IPMI_STAT_sent_ipmb_command_errs,
350 /* Each retransmit increments this count. */
351 IPMI_STAT_retransmitted_ipmb_commands,
354 * When a message times out (runs out of retransmits) this is
355 * incremented.
357 IPMI_STAT_timed_out_ipmb_commands,
360 * This is like above, but for broadcasts. Broadcasts are
361 * *not* included in the above count (they are expected to
362 * time out).
364 IPMI_STAT_timed_out_ipmb_broadcasts,
366 /* Responses I have sent to the IPMB bus. */
367 IPMI_STAT_sent_ipmb_responses,
369 /* The response was delivered to the user. */
370 IPMI_STAT_handled_ipmb_responses,
372 /* The response had invalid data in it. */
373 IPMI_STAT_invalid_ipmb_responses,
375 /* The response didn't have anyone waiting for it. */
376 IPMI_STAT_unhandled_ipmb_responses,
378 /* Commands we sent out to the IPMB bus. */
379 IPMI_STAT_sent_lan_commands,
381 /* Commands sent on the IPMB that had errors on the SEND CMD */
382 IPMI_STAT_sent_lan_command_errs,
384 /* Each retransmit increments this count. */
385 IPMI_STAT_retransmitted_lan_commands,
388 * When a message times out (runs out of retransmits) this is
389 * incremented.
391 IPMI_STAT_timed_out_lan_commands,
393 /* Responses I have sent to the IPMB bus. */
394 IPMI_STAT_sent_lan_responses,
396 /* The response was delivered to the user. */
397 IPMI_STAT_handled_lan_responses,
399 /* The response had invalid data in it. */
400 IPMI_STAT_invalid_lan_responses,
402 /* The response didn't have anyone waiting for it. */
403 IPMI_STAT_unhandled_lan_responses,
405 /* The command was delivered to the user. */
406 IPMI_STAT_handled_commands,
408 /* The command had invalid data in it. */
409 IPMI_STAT_invalid_commands,
411 /* The command didn't have anyone waiting for it. */
412 IPMI_STAT_unhandled_commands,
414 /* Invalid data in an event. */
415 IPMI_STAT_invalid_events,
417 /* Events that were received with the proper format. */
418 IPMI_STAT_events,
420 /* Retransmissions on IPMB that failed. */
421 IPMI_STAT_dropped_rexmit_ipmb_commands,
423 /* Retransmissions on LAN that failed. */
424 IPMI_STAT_dropped_rexmit_lan_commands,
426 /* This *must* remain last, add new values above this. */
427 IPMI_NUM_STATS
431 #define IPMI_IPMB_NUM_SEQ 64
432 struct ipmi_smi {
433 struct module *owner;
435 /* What interface number are we? */
436 int intf_num;
438 struct kref refcount;
440 /* Set when the interface is being unregistered. */
441 bool in_shutdown;
443 /* Used for a list of interfaces. */
444 struct list_head link;
447 * The list of upper layers that are using me. seq_lock write
448 * protects this. Read protection is with srcu.
450 struct list_head users;
451 struct srcu_struct users_srcu;
453 /* Used for wake ups at startup. */
454 wait_queue_head_t waitq;
457 * Prevents the interface from being unregistered when the
458 * interface is used by being looked up through the BMC
459 * structure.
461 struct mutex bmc_reg_mutex;
463 struct bmc_device tmp_bmc;
464 struct bmc_device *bmc;
465 bool bmc_registered;
466 struct list_head bmc_link;
467 char *my_dev_name;
468 bool in_bmc_register; /* Handle recursive situations. Yuck. */
469 struct work_struct bmc_reg_work;
471 const struct ipmi_smi_handlers *handlers;
472 void *send_info;
474 /* Driver-model device for the system interface. */
475 struct device *si_dev;
478 * A table of sequence numbers for this interface. We use the
479 * sequence numbers for IPMB messages that go out of the
480 * interface to match them up with their responses. A routine
481 * is called periodically to time the items in this list.
483 spinlock_t seq_lock;
484 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
485 int curr_seq;
488 * Messages queued for delivery. If delivery fails (out of memory
489 * for instance), They will stay in here to be processed later in a
490 * periodic timer interrupt. The tasklet is for handling received
491 * messages directly from the handler.
493 spinlock_t waiting_rcv_msgs_lock;
494 struct list_head waiting_rcv_msgs;
495 atomic_t watchdog_pretimeouts_to_deliver;
496 struct tasklet_struct recv_tasklet;
498 spinlock_t xmit_msgs_lock;
499 struct list_head xmit_msgs;
500 struct ipmi_smi_msg *curr_msg;
501 struct list_head hp_xmit_msgs;
504 * The list of command receivers that are registered for commands
505 * on this interface.
507 struct mutex cmd_rcvrs_mutex;
508 struct list_head cmd_rcvrs;
511 * Events that were queues because no one was there to receive
512 * them.
514 spinlock_t events_lock; /* For dealing with event stuff. */
515 struct list_head waiting_events;
516 unsigned int waiting_events_count; /* How many events in queue? */
517 char delivering_events;
518 char event_msg_printed;
520 /* How many users are waiting for events? */
521 atomic_t event_waiters;
522 unsigned int ticks_to_req_ev;
524 spinlock_t watch_lock; /* For dealing with watch stuff below. */
526 /* How many users are waiting for commands? */
527 unsigned int command_waiters;
529 /* How many users are waiting for watchdogs? */
530 unsigned int watchdog_waiters;
532 /* How many users are waiting for message responses? */
533 unsigned int response_waiters;
536 * Tells what the lower layer has last been asked to watch for,
537 * messages and/or watchdogs. Protected by watch_lock.
539 unsigned int last_watch_mask;
542 * The event receiver for my BMC, only really used at panic
543 * shutdown as a place to store this.
545 unsigned char event_receiver;
546 unsigned char event_receiver_lun;
547 unsigned char local_sel_device;
548 unsigned char local_event_generator;
550 /* For handling of maintenance mode. */
551 int maintenance_mode;
552 bool maintenance_mode_enable;
553 int auto_maintenance_timeout;
554 spinlock_t maintenance_mode_lock; /* Used in a timer... */
557 * If we are doing maintenance on something on IPMB, extend
558 * the timeout time to avoid timeouts writing firmware and
559 * such.
561 int ipmb_maintenance_mode_timeout;
564 * A cheap hack, if this is non-null and a message to an
565 * interface comes in with a NULL user, call this routine with
566 * it. Note that the message will still be freed by the
567 * caller. This only works on the system interface.
569 * Protected by bmc_reg_mutex.
571 void (*null_user_handler)(struct ipmi_smi *intf,
572 struct ipmi_recv_msg *msg);
575 * When we are scanning the channels for an SMI, this will
576 * tell which channel we are scanning.
578 int curr_channel;
580 /* Channel information */
581 struct ipmi_channel_set *channel_list;
582 unsigned int curr_working_cset; /* First index into the following. */
583 struct ipmi_channel_set wchannels[2];
584 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
585 bool channels_ready;
587 atomic_t stats[IPMI_NUM_STATS];
590 * run_to_completion duplicate of smb_info, smi_info
591 * and ipmi_serial_info structures. Used to decrease numbers of
592 * parameters passed by "low" level IPMI code.
594 int run_to_completion;
596 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
598 static void __get_guid(struct ipmi_smi *intf);
599 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
600 static int __ipmi_bmc_register(struct ipmi_smi *intf,
601 struct ipmi_device_id *id,
602 bool guid_set, guid_t *guid, int intf_num);
603 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
607 * The driver model view of the IPMI messaging driver.
609 static struct platform_driver ipmidriver = {
610 .driver = {
611 .name = "ipmi",
612 .bus = &platform_bus_type
616 * This mutex keeps us from adding the same BMC twice.
618 static DEFINE_MUTEX(ipmidriver_mutex);
620 static LIST_HEAD(ipmi_interfaces);
621 static DEFINE_MUTEX(ipmi_interfaces_mutex);
622 #define ipmi_interfaces_mutex_held() \
623 lockdep_is_held(&ipmi_interfaces_mutex)
624 static struct srcu_struct ipmi_interfaces_srcu;
627 * List of watchers that want to know when smi's are added and deleted.
629 static LIST_HEAD(smi_watchers);
630 static DEFINE_MUTEX(smi_watchers_mutex);
632 #define ipmi_inc_stat(intf, stat) \
633 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
634 #define ipmi_get_stat(intf, stat) \
635 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
637 static const char * const addr_src_to_str[] = {
638 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
639 "device-tree", "platform"
642 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
644 if (src >= SI_LAST)
645 src = 0; /* Invalid */
646 return addr_src_to_str[src];
648 EXPORT_SYMBOL(ipmi_addr_src_to_str);
650 static int is_lan_addr(struct ipmi_addr *addr)
652 return addr->addr_type == IPMI_LAN_ADDR_TYPE;
655 static int is_ipmb_addr(struct ipmi_addr *addr)
657 return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
660 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
662 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
665 static void free_recv_msg_list(struct list_head *q)
667 struct ipmi_recv_msg *msg, *msg2;
669 list_for_each_entry_safe(msg, msg2, q, link) {
670 list_del(&msg->link);
671 ipmi_free_recv_msg(msg);
675 static void free_smi_msg_list(struct list_head *q)
677 struct ipmi_smi_msg *msg, *msg2;
679 list_for_each_entry_safe(msg, msg2, q, link) {
680 list_del(&msg->link);
681 ipmi_free_smi_msg(msg);
685 static void clean_up_interface_data(struct ipmi_smi *intf)
687 int i;
688 struct cmd_rcvr *rcvr, *rcvr2;
689 struct list_head list;
691 tasklet_kill(&intf->recv_tasklet);
693 free_smi_msg_list(&intf->waiting_rcv_msgs);
694 free_recv_msg_list(&intf->waiting_events);
697 * Wholesale remove all the entries from the list in the
698 * interface and wait for RCU to know that none are in use.
700 mutex_lock(&intf->cmd_rcvrs_mutex);
701 INIT_LIST_HEAD(&list);
702 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
703 mutex_unlock(&intf->cmd_rcvrs_mutex);
705 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
706 kfree(rcvr);
708 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
709 if ((intf->seq_table[i].inuse)
710 && (intf->seq_table[i].recv_msg))
711 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
715 static void intf_free(struct kref *ref)
717 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
719 clean_up_interface_data(intf);
720 kfree(intf);
723 struct watcher_entry {
724 int intf_num;
725 struct ipmi_smi *intf;
726 struct list_head link;
729 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
731 struct ipmi_smi *intf;
732 int index, rv;
735 * Make sure the driver is actually initialized, this handles
736 * problems with initialization order.
738 rv = ipmi_init_msghandler();
739 if (rv)
740 return rv;
742 mutex_lock(&smi_watchers_mutex);
744 list_add(&watcher->link, &smi_watchers);
746 index = srcu_read_lock(&ipmi_interfaces_srcu);
747 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
748 int intf_num = READ_ONCE(intf->intf_num);
750 if (intf_num == -1)
751 continue;
752 watcher->new_smi(intf_num, intf->si_dev);
754 srcu_read_unlock(&ipmi_interfaces_srcu, index);
756 mutex_unlock(&smi_watchers_mutex);
758 return 0;
760 EXPORT_SYMBOL(ipmi_smi_watcher_register);
762 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
764 mutex_lock(&smi_watchers_mutex);
765 list_del(&watcher->link);
766 mutex_unlock(&smi_watchers_mutex);
767 return 0;
769 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
772 * Must be called with smi_watchers_mutex held.
774 static void
775 call_smi_watchers(int i, struct device *dev)
777 struct ipmi_smi_watcher *w;
779 mutex_lock(&smi_watchers_mutex);
780 list_for_each_entry(w, &smi_watchers, link) {
781 if (try_module_get(w->owner)) {
782 w->new_smi(i, dev);
783 module_put(w->owner);
786 mutex_unlock(&smi_watchers_mutex);
789 static int
790 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
792 if (addr1->addr_type != addr2->addr_type)
793 return 0;
795 if (addr1->channel != addr2->channel)
796 return 0;
798 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
799 struct ipmi_system_interface_addr *smi_addr1
800 = (struct ipmi_system_interface_addr *) addr1;
801 struct ipmi_system_interface_addr *smi_addr2
802 = (struct ipmi_system_interface_addr *) addr2;
803 return (smi_addr1->lun == smi_addr2->lun);
806 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
807 struct ipmi_ipmb_addr *ipmb_addr1
808 = (struct ipmi_ipmb_addr *) addr1;
809 struct ipmi_ipmb_addr *ipmb_addr2
810 = (struct ipmi_ipmb_addr *) addr2;
812 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
813 && (ipmb_addr1->lun == ipmb_addr2->lun));
816 if (is_lan_addr(addr1)) {
817 struct ipmi_lan_addr *lan_addr1
818 = (struct ipmi_lan_addr *) addr1;
819 struct ipmi_lan_addr *lan_addr2
820 = (struct ipmi_lan_addr *) addr2;
822 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
823 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
824 && (lan_addr1->session_handle
825 == lan_addr2->session_handle)
826 && (lan_addr1->lun == lan_addr2->lun));
829 return 1;
832 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
834 if (len < sizeof(struct ipmi_system_interface_addr))
835 return -EINVAL;
837 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
838 if (addr->channel != IPMI_BMC_CHANNEL)
839 return -EINVAL;
840 return 0;
843 if ((addr->channel == IPMI_BMC_CHANNEL)
844 || (addr->channel >= IPMI_MAX_CHANNELS)
845 || (addr->channel < 0))
846 return -EINVAL;
848 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
849 if (len < sizeof(struct ipmi_ipmb_addr))
850 return -EINVAL;
851 return 0;
854 if (is_lan_addr(addr)) {
855 if (len < sizeof(struct ipmi_lan_addr))
856 return -EINVAL;
857 return 0;
860 return -EINVAL;
862 EXPORT_SYMBOL(ipmi_validate_addr);
864 unsigned int ipmi_addr_length(int addr_type)
866 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
867 return sizeof(struct ipmi_system_interface_addr);
869 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
870 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
871 return sizeof(struct ipmi_ipmb_addr);
873 if (addr_type == IPMI_LAN_ADDR_TYPE)
874 return sizeof(struct ipmi_lan_addr);
876 return 0;
878 EXPORT_SYMBOL(ipmi_addr_length);
880 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
882 int rv = 0;
884 if (!msg->user) {
885 /* Special handling for NULL users. */
886 if (intf->null_user_handler) {
887 intf->null_user_handler(intf, msg);
888 } else {
889 /* No handler, so give up. */
890 rv = -EINVAL;
892 ipmi_free_recv_msg(msg);
893 } else if (oops_in_progress) {
895 * If we are running in the panic context, calling the
896 * receive handler doesn't much meaning and has a deadlock
897 * risk. At this moment, simply skip it in that case.
899 ipmi_free_recv_msg(msg);
900 } else {
901 int index;
902 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
904 if (user) {
905 user->handler->ipmi_recv_hndl(msg, user->handler_data);
906 release_ipmi_user(user, index);
907 } else {
908 /* User went away, give up. */
909 ipmi_free_recv_msg(msg);
910 rv = -EINVAL;
914 return rv;
917 static void deliver_local_response(struct ipmi_smi *intf,
918 struct ipmi_recv_msg *msg)
920 if (deliver_response(intf, msg))
921 ipmi_inc_stat(intf, unhandled_local_responses);
922 else
923 ipmi_inc_stat(intf, handled_local_responses);
926 static void deliver_err_response(struct ipmi_smi *intf,
927 struct ipmi_recv_msg *msg, int err)
929 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
930 msg->msg_data[0] = err;
931 msg->msg.netfn |= 1; /* Convert to a response. */
932 msg->msg.data_len = 1;
933 msg->msg.data = msg->msg_data;
934 deliver_local_response(intf, msg);
937 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
939 unsigned long iflags;
941 if (!intf->handlers->set_need_watch)
942 return;
944 spin_lock_irqsave(&intf->watch_lock, iflags);
945 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
946 intf->response_waiters++;
948 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
949 intf->watchdog_waiters++;
951 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
952 intf->command_waiters++;
954 if ((intf->last_watch_mask & flags) != flags) {
955 intf->last_watch_mask |= flags;
956 intf->handlers->set_need_watch(intf->send_info,
957 intf->last_watch_mask);
959 spin_unlock_irqrestore(&intf->watch_lock, iflags);
962 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
964 unsigned long iflags;
966 if (!intf->handlers->set_need_watch)
967 return;
969 spin_lock_irqsave(&intf->watch_lock, iflags);
970 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
971 intf->response_waiters--;
973 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
974 intf->watchdog_waiters--;
976 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
977 intf->command_waiters--;
979 flags = 0;
980 if (intf->response_waiters)
981 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
982 if (intf->watchdog_waiters)
983 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
984 if (intf->command_waiters)
985 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
987 if (intf->last_watch_mask != flags) {
988 intf->last_watch_mask = flags;
989 intf->handlers->set_need_watch(intf->send_info,
990 intf->last_watch_mask);
992 spin_unlock_irqrestore(&intf->watch_lock, iflags);
996 * Find the next sequence number not being used and add the given
997 * message with the given timeout to the sequence table. This must be
998 * called with the interface's seq_lock held.
1000 static int intf_next_seq(struct ipmi_smi *intf,
1001 struct ipmi_recv_msg *recv_msg,
1002 unsigned long timeout,
1003 int retries,
1004 int broadcast,
1005 unsigned char *seq,
1006 long *seqid)
1008 int rv = 0;
1009 unsigned int i;
1011 if (timeout == 0)
1012 timeout = default_retry_ms;
1013 if (retries < 0)
1014 retries = default_max_retries;
1016 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1017 i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1018 if (!intf->seq_table[i].inuse)
1019 break;
1022 if (!intf->seq_table[i].inuse) {
1023 intf->seq_table[i].recv_msg = recv_msg;
1026 * Start with the maximum timeout, when the send response
1027 * comes in we will start the real timer.
1029 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1030 intf->seq_table[i].orig_timeout = timeout;
1031 intf->seq_table[i].retries_left = retries;
1032 intf->seq_table[i].broadcast = broadcast;
1033 intf->seq_table[i].inuse = 1;
1034 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1035 *seq = i;
1036 *seqid = intf->seq_table[i].seqid;
1037 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1038 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1039 need_waiter(intf);
1040 } else {
1041 rv = -EAGAIN;
1044 return rv;
1048 * Return the receive message for the given sequence number and
1049 * release the sequence number so it can be reused. Some other data
1050 * is passed in to be sure the message matches up correctly (to help
1051 * guard against message coming in after their timeout and the
1052 * sequence number being reused).
1054 static int intf_find_seq(struct ipmi_smi *intf,
1055 unsigned char seq,
1056 short channel,
1057 unsigned char cmd,
1058 unsigned char netfn,
1059 struct ipmi_addr *addr,
1060 struct ipmi_recv_msg **recv_msg)
1062 int rv = -ENODEV;
1063 unsigned long flags;
1065 if (seq >= IPMI_IPMB_NUM_SEQ)
1066 return -EINVAL;
1068 spin_lock_irqsave(&intf->seq_lock, flags);
1069 if (intf->seq_table[seq].inuse) {
1070 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1072 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1073 && (msg->msg.netfn == netfn)
1074 && (ipmi_addr_equal(addr, &msg->addr))) {
1075 *recv_msg = msg;
1076 intf->seq_table[seq].inuse = 0;
1077 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1078 rv = 0;
1081 spin_unlock_irqrestore(&intf->seq_lock, flags);
1083 return rv;
1087 /* Start the timer for a specific sequence table entry. */
1088 static int intf_start_seq_timer(struct ipmi_smi *intf,
1089 long msgid)
1091 int rv = -ENODEV;
1092 unsigned long flags;
1093 unsigned char seq;
1094 unsigned long seqid;
1097 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1099 spin_lock_irqsave(&intf->seq_lock, flags);
1101 * We do this verification because the user can be deleted
1102 * while a message is outstanding.
1104 if ((intf->seq_table[seq].inuse)
1105 && (intf->seq_table[seq].seqid == seqid)) {
1106 struct seq_table *ent = &intf->seq_table[seq];
1107 ent->timeout = ent->orig_timeout;
1108 rv = 0;
1110 spin_unlock_irqrestore(&intf->seq_lock, flags);
1112 return rv;
1115 /* Got an error for the send message for a specific sequence number. */
1116 static int intf_err_seq(struct ipmi_smi *intf,
1117 long msgid,
1118 unsigned int err)
1120 int rv = -ENODEV;
1121 unsigned long flags;
1122 unsigned char seq;
1123 unsigned long seqid;
1124 struct ipmi_recv_msg *msg = NULL;
1127 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1129 spin_lock_irqsave(&intf->seq_lock, flags);
1131 * We do this verification because the user can be deleted
1132 * while a message is outstanding.
1134 if ((intf->seq_table[seq].inuse)
1135 && (intf->seq_table[seq].seqid == seqid)) {
1136 struct seq_table *ent = &intf->seq_table[seq];
1138 ent->inuse = 0;
1139 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1140 msg = ent->recv_msg;
1141 rv = 0;
1143 spin_unlock_irqrestore(&intf->seq_lock, flags);
1145 if (msg)
1146 deliver_err_response(intf, msg, err);
1148 return rv;
1151 static void free_user_work(struct work_struct *work)
1153 struct ipmi_user *user = container_of(work, struct ipmi_user,
1154 remove_work);
1156 cleanup_srcu_struct(&user->release_barrier);
1157 vfree(user);
1160 int ipmi_create_user(unsigned int if_num,
1161 const struct ipmi_user_hndl *handler,
1162 void *handler_data,
1163 struct ipmi_user **user)
1165 unsigned long flags;
1166 struct ipmi_user *new_user;
1167 int rv, index;
1168 struct ipmi_smi *intf;
1171 * There is no module usecount here, because it's not
1172 * required. Since this can only be used by and called from
1173 * other modules, they will implicitly use this module, and
1174 * thus this can't be removed unless the other modules are
1175 * removed.
1178 if (handler == NULL)
1179 return -EINVAL;
1182 * Make sure the driver is actually initialized, this handles
1183 * problems with initialization order.
1185 rv = ipmi_init_msghandler();
1186 if (rv)
1187 return rv;
1189 new_user = vzalloc(sizeof(*new_user));
1190 if (!new_user)
1191 return -ENOMEM;
1193 index = srcu_read_lock(&ipmi_interfaces_srcu);
1194 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1195 if (intf->intf_num == if_num)
1196 goto found;
1198 /* Not found, return an error */
1199 rv = -EINVAL;
1200 goto out_kfree;
1202 found:
1203 INIT_WORK(&new_user->remove_work, free_user_work);
1205 rv = init_srcu_struct(&new_user->release_barrier);
1206 if (rv)
1207 goto out_kfree;
1209 if (!try_module_get(intf->owner)) {
1210 rv = -ENODEV;
1211 goto out_kfree;
1214 /* Note that each existing user holds a refcount to the interface. */
1215 kref_get(&intf->refcount);
1217 kref_init(&new_user->refcount);
1218 new_user->handler = handler;
1219 new_user->handler_data = handler_data;
1220 new_user->intf = intf;
1221 new_user->gets_events = false;
1223 rcu_assign_pointer(new_user->self, new_user);
1224 spin_lock_irqsave(&intf->seq_lock, flags);
1225 list_add_rcu(&new_user->link, &intf->users);
1226 spin_unlock_irqrestore(&intf->seq_lock, flags);
1227 if (handler->ipmi_watchdog_pretimeout)
1228 /* User wants pretimeouts, so make sure to watch for them. */
1229 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1230 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1231 *user = new_user;
1232 return 0;
1234 out_kfree:
1235 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1236 vfree(new_user);
1237 return rv;
1239 EXPORT_SYMBOL(ipmi_create_user);
1241 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1243 int rv, index;
1244 struct ipmi_smi *intf;
1246 index = srcu_read_lock(&ipmi_interfaces_srcu);
1247 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1248 if (intf->intf_num == if_num)
1249 goto found;
1251 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1253 /* Not found, return an error */
1254 return -EINVAL;
1256 found:
1257 if (!intf->handlers->get_smi_info)
1258 rv = -ENOTTY;
1259 else
1260 rv = intf->handlers->get_smi_info(intf->send_info, data);
1261 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1263 return rv;
1265 EXPORT_SYMBOL(ipmi_get_smi_info);
1267 static void free_user(struct kref *ref)
1269 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1271 /* SRCU cleanup must happen in task context. */
1272 schedule_work(&user->remove_work);
1275 static void _ipmi_destroy_user(struct ipmi_user *user)
1277 struct ipmi_smi *intf = user->intf;
1278 int i;
1279 unsigned long flags;
1280 struct cmd_rcvr *rcvr;
1281 struct cmd_rcvr *rcvrs = NULL;
1283 if (!acquire_ipmi_user(user, &i)) {
1285 * The user has already been cleaned up, just make sure
1286 * nothing is using it and return.
1288 synchronize_srcu(&user->release_barrier);
1289 return;
1292 rcu_assign_pointer(user->self, NULL);
1293 release_ipmi_user(user, i);
1295 synchronize_srcu(&user->release_barrier);
1297 if (user->handler->shutdown)
1298 user->handler->shutdown(user->handler_data);
1300 if (user->handler->ipmi_watchdog_pretimeout)
1301 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1303 if (user->gets_events)
1304 atomic_dec(&intf->event_waiters);
1306 /* Remove the user from the interface's sequence table. */
1307 spin_lock_irqsave(&intf->seq_lock, flags);
1308 list_del_rcu(&user->link);
1310 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1311 if (intf->seq_table[i].inuse
1312 && (intf->seq_table[i].recv_msg->user == user)) {
1313 intf->seq_table[i].inuse = 0;
1314 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1315 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1318 spin_unlock_irqrestore(&intf->seq_lock, flags);
1321 * Remove the user from the command receiver's table. First
1322 * we build a list of everything (not using the standard link,
1323 * since other things may be using it till we do
1324 * synchronize_srcu()) then free everything in that list.
1326 mutex_lock(&intf->cmd_rcvrs_mutex);
1327 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1328 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1329 if (rcvr->user == user) {
1330 list_del_rcu(&rcvr->link);
1331 rcvr->next = rcvrs;
1332 rcvrs = rcvr;
1335 mutex_unlock(&intf->cmd_rcvrs_mutex);
1336 synchronize_rcu();
1337 while (rcvrs) {
1338 rcvr = rcvrs;
1339 rcvrs = rcvr->next;
1340 kfree(rcvr);
1343 kref_put(&intf->refcount, intf_free);
1344 module_put(intf->owner);
1347 int ipmi_destroy_user(struct ipmi_user *user)
1349 _ipmi_destroy_user(user);
1351 kref_put(&user->refcount, free_user);
1353 return 0;
1355 EXPORT_SYMBOL(ipmi_destroy_user);
1357 int ipmi_get_version(struct ipmi_user *user,
1358 unsigned char *major,
1359 unsigned char *minor)
1361 struct ipmi_device_id id;
1362 int rv, index;
1364 user = acquire_ipmi_user(user, &index);
1365 if (!user)
1366 return -ENODEV;
1368 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1369 if (!rv) {
1370 *major = ipmi_version_major(&id);
1371 *minor = ipmi_version_minor(&id);
1373 release_ipmi_user(user, index);
1375 return rv;
1377 EXPORT_SYMBOL(ipmi_get_version);
1379 int ipmi_set_my_address(struct ipmi_user *user,
1380 unsigned int channel,
1381 unsigned char address)
1383 int index, rv = 0;
1385 user = acquire_ipmi_user(user, &index);
1386 if (!user)
1387 return -ENODEV;
1389 if (channel >= IPMI_MAX_CHANNELS) {
1390 rv = -EINVAL;
1391 } else {
1392 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1393 user->intf->addrinfo[channel].address = address;
1395 release_ipmi_user(user, index);
1397 return rv;
1399 EXPORT_SYMBOL(ipmi_set_my_address);
1401 int ipmi_get_my_address(struct ipmi_user *user,
1402 unsigned int channel,
1403 unsigned char *address)
1405 int index, rv = 0;
1407 user = acquire_ipmi_user(user, &index);
1408 if (!user)
1409 return -ENODEV;
1411 if (channel >= IPMI_MAX_CHANNELS) {
1412 rv = -EINVAL;
1413 } else {
1414 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1415 *address = user->intf->addrinfo[channel].address;
1417 release_ipmi_user(user, index);
1419 return rv;
1421 EXPORT_SYMBOL(ipmi_get_my_address);
1423 int ipmi_set_my_LUN(struct ipmi_user *user,
1424 unsigned int channel,
1425 unsigned char LUN)
1427 int index, rv = 0;
1429 user = acquire_ipmi_user(user, &index);
1430 if (!user)
1431 return -ENODEV;
1433 if (channel >= IPMI_MAX_CHANNELS) {
1434 rv = -EINVAL;
1435 } else {
1436 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1437 user->intf->addrinfo[channel].lun = LUN & 0x3;
1439 release_ipmi_user(user, index);
1441 return rv;
1443 EXPORT_SYMBOL(ipmi_set_my_LUN);
1445 int ipmi_get_my_LUN(struct ipmi_user *user,
1446 unsigned int channel,
1447 unsigned char *address)
1449 int index, rv = 0;
1451 user = acquire_ipmi_user(user, &index);
1452 if (!user)
1453 return -ENODEV;
1455 if (channel >= IPMI_MAX_CHANNELS) {
1456 rv = -EINVAL;
1457 } else {
1458 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1459 *address = user->intf->addrinfo[channel].lun;
1461 release_ipmi_user(user, index);
1463 return rv;
1465 EXPORT_SYMBOL(ipmi_get_my_LUN);
1467 int ipmi_get_maintenance_mode(struct ipmi_user *user)
1469 int mode, index;
1470 unsigned long flags;
1472 user = acquire_ipmi_user(user, &index);
1473 if (!user)
1474 return -ENODEV;
1476 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1477 mode = user->intf->maintenance_mode;
1478 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1479 release_ipmi_user(user, index);
1481 return mode;
1483 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1485 static void maintenance_mode_update(struct ipmi_smi *intf)
1487 if (intf->handlers->set_maintenance_mode)
1488 intf->handlers->set_maintenance_mode(
1489 intf->send_info, intf->maintenance_mode_enable);
1492 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1494 int rv = 0, index;
1495 unsigned long flags;
1496 struct ipmi_smi *intf = user->intf;
1498 user = acquire_ipmi_user(user, &index);
1499 if (!user)
1500 return -ENODEV;
1502 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1503 if (intf->maintenance_mode != mode) {
1504 switch (mode) {
1505 case IPMI_MAINTENANCE_MODE_AUTO:
1506 intf->maintenance_mode_enable
1507 = (intf->auto_maintenance_timeout > 0);
1508 break;
1510 case IPMI_MAINTENANCE_MODE_OFF:
1511 intf->maintenance_mode_enable = false;
1512 break;
1514 case IPMI_MAINTENANCE_MODE_ON:
1515 intf->maintenance_mode_enable = true;
1516 break;
1518 default:
1519 rv = -EINVAL;
1520 goto out_unlock;
1522 intf->maintenance_mode = mode;
1524 maintenance_mode_update(intf);
1526 out_unlock:
1527 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1528 release_ipmi_user(user, index);
1530 return rv;
1532 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1534 int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1536 unsigned long flags;
1537 struct ipmi_smi *intf = user->intf;
1538 struct ipmi_recv_msg *msg, *msg2;
1539 struct list_head msgs;
1540 int index;
1542 user = acquire_ipmi_user(user, &index);
1543 if (!user)
1544 return -ENODEV;
1546 INIT_LIST_HEAD(&msgs);
1548 spin_lock_irqsave(&intf->events_lock, flags);
1549 if (user->gets_events == val)
1550 goto out;
1552 user->gets_events = val;
1554 if (val) {
1555 if (atomic_inc_return(&intf->event_waiters) == 1)
1556 need_waiter(intf);
1557 } else {
1558 atomic_dec(&intf->event_waiters);
1561 if (intf->delivering_events)
1563 * Another thread is delivering events for this, so
1564 * let it handle any new events.
1566 goto out;
1568 /* Deliver any queued events. */
1569 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1570 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1571 list_move_tail(&msg->link, &msgs);
1572 intf->waiting_events_count = 0;
1573 if (intf->event_msg_printed) {
1574 dev_warn(intf->si_dev, "Event queue no longer full\n");
1575 intf->event_msg_printed = 0;
1578 intf->delivering_events = 1;
1579 spin_unlock_irqrestore(&intf->events_lock, flags);
1581 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1582 msg->user = user;
1583 kref_get(&user->refcount);
1584 deliver_local_response(intf, msg);
1587 spin_lock_irqsave(&intf->events_lock, flags);
1588 intf->delivering_events = 0;
1591 out:
1592 spin_unlock_irqrestore(&intf->events_lock, flags);
1593 release_ipmi_user(user, index);
1595 return 0;
1597 EXPORT_SYMBOL(ipmi_set_gets_events);
1599 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1600 unsigned char netfn,
1601 unsigned char cmd,
1602 unsigned char chan)
1604 struct cmd_rcvr *rcvr;
1606 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1607 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1608 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1609 && (rcvr->chans & (1 << chan)))
1610 return rcvr;
1612 return NULL;
1615 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1616 unsigned char netfn,
1617 unsigned char cmd,
1618 unsigned int chans)
1620 struct cmd_rcvr *rcvr;
1622 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1623 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1624 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1625 && (rcvr->chans & chans))
1626 return 0;
1628 return 1;
1631 int ipmi_register_for_cmd(struct ipmi_user *user,
1632 unsigned char netfn,
1633 unsigned char cmd,
1634 unsigned int chans)
1636 struct ipmi_smi *intf = user->intf;
1637 struct cmd_rcvr *rcvr;
1638 int rv = 0, index;
1640 user = acquire_ipmi_user(user, &index);
1641 if (!user)
1642 return -ENODEV;
1644 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1645 if (!rcvr) {
1646 rv = -ENOMEM;
1647 goto out_release;
1649 rcvr->cmd = cmd;
1650 rcvr->netfn = netfn;
1651 rcvr->chans = chans;
1652 rcvr->user = user;
1654 mutex_lock(&intf->cmd_rcvrs_mutex);
1655 /* Make sure the command/netfn is not already registered. */
1656 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1657 rv = -EBUSY;
1658 goto out_unlock;
1661 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1663 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1665 out_unlock:
1666 mutex_unlock(&intf->cmd_rcvrs_mutex);
1667 if (rv)
1668 kfree(rcvr);
1669 out_release:
1670 release_ipmi_user(user, index);
1672 return rv;
1674 EXPORT_SYMBOL(ipmi_register_for_cmd);
1676 int ipmi_unregister_for_cmd(struct ipmi_user *user,
1677 unsigned char netfn,
1678 unsigned char cmd,
1679 unsigned int chans)
1681 struct ipmi_smi *intf = user->intf;
1682 struct cmd_rcvr *rcvr;
1683 struct cmd_rcvr *rcvrs = NULL;
1684 int i, rv = -ENOENT, index;
1686 user = acquire_ipmi_user(user, &index);
1687 if (!user)
1688 return -ENODEV;
1690 mutex_lock(&intf->cmd_rcvrs_mutex);
1691 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1692 if (((1 << i) & chans) == 0)
1693 continue;
1694 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1695 if (rcvr == NULL)
1696 continue;
1697 if (rcvr->user == user) {
1698 rv = 0;
1699 rcvr->chans &= ~chans;
1700 if (rcvr->chans == 0) {
1701 list_del_rcu(&rcvr->link);
1702 rcvr->next = rcvrs;
1703 rcvrs = rcvr;
1707 mutex_unlock(&intf->cmd_rcvrs_mutex);
1708 synchronize_rcu();
1709 release_ipmi_user(user, index);
1710 while (rcvrs) {
1711 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1712 rcvr = rcvrs;
1713 rcvrs = rcvr->next;
1714 kfree(rcvr);
1717 return rv;
1719 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1721 static unsigned char
1722 ipmb_checksum(unsigned char *data, int size)
1724 unsigned char csum = 0;
1726 for (; size > 0; size--, data++)
1727 csum += *data;
1729 return -csum;
1732 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1733 struct kernel_ipmi_msg *msg,
1734 struct ipmi_ipmb_addr *ipmb_addr,
1735 long msgid,
1736 unsigned char ipmb_seq,
1737 int broadcast,
1738 unsigned char source_address,
1739 unsigned char source_lun)
1741 int i = broadcast;
1743 /* Format the IPMB header data. */
1744 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1745 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1746 smi_msg->data[2] = ipmb_addr->channel;
1747 if (broadcast)
1748 smi_msg->data[3] = 0;
1749 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1750 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1751 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1752 smi_msg->data[i+6] = source_address;
1753 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1754 smi_msg->data[i+8] = msg->cmd;
1756 /* Now tack on the data to the message. */
1757 if (msg->data_len > 0)
1758 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1759 smi_msg->data_size = msg->data_len + 9;
1761 /* Now calculate the checksum and tack it on. */
1762 smi_msg->data[i+smi_msg->data_size]
1763 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1766 * Add on the checksum size and the offset from the
1767 * broadcast.
1769 smi_msg->data_size += 1 + i;
1771 smi_msg->msgid = msgid;
1774 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1775 struct kernel_ipmi_msg *msg,
1776 struct ipmi_lan_addr *lan_addr,
1777 long msgid,
1778 unsigned char ipmb_seq,
1779 unsigned char source_lun)
1781 /* Format the IPMB header data. */
1782 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1783 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1784 smi_msg->data[2] = lan_addr->channel;
1785 smi_msg->data[3] = lan_addr->session_handle;
1786 smi_msg->data[4] = lan_addr->remote_SWID;
1787 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1788 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1789 smi_msg->data[7] = lan_addr->local_SWID;
1790 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1791 smi_msg->data[9] = msg->cmd;
1793 /* Now tack on the data to the message. */
1794 if (msg->data_len > 0)
1795 memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1796 smi_msg->data_size = msg->data_len + 10;
1798 /* Now calculate the checksum and tack it on. */
1799 smi_msg->data[smi_msg->data_size]
1800 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1803 * Add on the checksum size and the offset from the
1804 * broadcast.
1806 smi_msg->data_size += 1;
1808 smi_msg->msgid = msgid;
1811 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1812 struct ipmi_smi_msg *smi_msg,
1813 int priority)
1815 if (intf->curr_msg) {
1816 if (priority > 0)
1817 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1818 else
1819 list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1820 smi_msg = NULL;
1821 } else {
1822 intf->curr_msg = smi_msg;
1825 return smi_msg;
1828 static void smi_send(struct ipmi_smi *intf,
1829 const struct ipmi_smi_handlers *handlers,
1830 struct ipmi_smi_msg *smi_msg, int priority)
1832 int run_to_completion = intf->run_to_completion;
1833 unsigned long flags = 0;
1835 if (!run_to_completion)
1836 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1837 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1839 if (!run_to_completion)
1840 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1842 if (smi_msg)
1843 handlers->sender(intf->send_info, smi_msg);
1846 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1848 return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1849 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1850 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1851 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1854 static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
1855 struct ipmi_addr *addr,
1856 long msgid,
1857 struct kernel_ipmi_msg *msg,
1858 struct ipmi_smi_msg *smi_msg,
1859 struct ipmi_recv_msg *recv_msg,
1860 int retries,
1861 unsigned int retry_time_ms)
1863 struct ipmi_system_interface_addr *smi_addr;
1865 if (msg->netfn & 1)
1866 /* Responses are not allowed to the SMI. */
1867 return -EINVAL;
1869 smi_addr = (struct ipmi_system_interface_addr *) addr;
1870 if (smi_addr->lun > 3) {
1871 ipmi_inc_stat(intf, sent_invalid_commands);
1872 return -EINVAL;
1875 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1877 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1878 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1879 || (msg->cmd == IPMI_GET_MSG_CMD)
1880 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1882 * We don't let the user do these, since we manage
1883 * the sequence numbers.
1885 ipmi_inc_stat(intf, sent_invalid_commands);
1886 return -EINVAL;
1889 if (is_maintenance_mode_cmd(msg)) {
1890 unsigned long flags;
1892 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1893 intf->auto_maintenance_timeout
1894 = maintenance_mode_timeout_ms;
1895 if (!intf->maintenance_mode
1896 && !intf->maintenance_mode_enable) {
1897 intf->maintenance_mode_enable = true;
1898 maintenance_mode_update(intf);
1900 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1901 flags);
1904 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1905 ipmi_inc_stat(intf, sent_invalid_commands);
1906 return -EMSGSIZE;
1909 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1910 smi_msg->data[1] = msg->cmd;
1911 smi_msg->msgid = msgid;
1912 smi_msg->user_data = recv_msg;
1913 if (msg->data_len > 0)
1914 memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1915 smi_msg->data_size = msg->data_len + 2;
1916 ipmi_inc_stat(intf, sent_local_commands);
1918 return 0;
1921 static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
1922 struct ipmi_addr *addr,
1923 long msgid,
1924 struct kernel_ipmi_msg *msg,
1925 struct ipmi_smi_msg *smi_msg,
1926 struct ipmi_recv_msg *recv_msg,
1927 unsigned char source_address,
1928 unsigned char source_lun,
1929 int retries,
1930 unsigned int retry_time_ms)
1932 struct ipmi_ipmb_addr *ipmb_addr;
1933 unsigned char ipmb_seq;
1934 long seqid;
1935 int broadcast = 0;
1936 struct ipmi_channel *chans;
1937 int rv = 0;
1939 if (addr->channel >= IPMI_MAX_CHANNELS) {
1940 ipmi_inc_stat(intf, sent_invalid_commands);
1941 return -EINVAL;
1944 chans = READ_ONCE(intf->channel_list)->c;
1946 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1947 ipmi_inc_stat(intf, sent_invalid_commands);
1948 return -EINVAL;
1951 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1953 * Broadcasts add a zero at the beginning of the
1954 * message, but otherwise is the same as an IPMB
1955 * address.
1957 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1958 broadcast = 1;
1959 retries = 0; /* Don't retry broadcasts. */
1963 * 9 for the header and 1 for the checksum, plus
1964 * possibly one for the broadcast.
1966 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1967 ipmi_inc_stat(intf, sent_invalid_commands);
1968 return -EMSGSIZE;
1971 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1972 if (ipmb_addr->lun > 3) {
1973 ipmi_inc_stat(intf, sent_invalid_commands);
1974 return -EINVAL;
1977 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1979 if (recv_msg->msg.netfn & 0x1) {
1981 * It's a response, so use the user's sequence
1982 * from msgid.
1984 ipmi_inc_stat(intf, sent_ipmb_responses);
1985 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1986 msgid, broadcast,
1987 source_address, source_lun);
1990 * Save the receive message so we can use it
1991 * to deliver the response.
1993 smi_msg->user_data = recv_msg;
1994 } else {
1995 /* It's a command, so get a sequence for it. */
1996 unsigned long flags;
1998 spin_lock_irqsave(&intf->seq_lock, flags);
2000 if (is_maintenance_mode_cmd(msg))
2001 intf->ipmb_maintenance_mode_timeout =
2002 maintenance_mode_timeout_ms;
2004 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2005 /* Different default in maintenance mode */
2006 retry_time_ms = default_maintenance_retry_ms;
2009 * Create a sequence number with a 1 second
2010 * timeout and 4 retries.
2012 rv = intf_next_seq(intf,
2013 recv_msg,
2014 retry_time_ms,
2015 retries,
2016 broadcast,
2017 &ipmb_seq,
2018 &seqid);
2019 if (rv)
2021 * We have used up all the sequence numbers,
2022 * probably, so abort.
2024 goto out_err;
2026 ipmi_inc_stat(intf, sent_ipmb_commands);
2029 * Store the sequence number in the message,
2030 * so that when the send message response
2031 * comes back we can start the timer.
2033 format_ipmb_msg(smi_msg, msg, ipmb_addr,
2034 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2035 ipmb_seq, broadcast,
2036 source_address, source_lun);
2039 * Copy the message into the recv message data, so we
2040 * can retransmit it later if necessary.
2042 memcpy(recv_msg->msg_data, smi_msg->data,
2043 smi_msg->data_size);
2044 recv_msg->msg.data = recv_msg->msg_data;
2045 recv_msg->msg.data_len = smi_msg->data_size;
2048 * We don't unlock until here, because we need
2049 * to copy the completed message into the
2050 * recv_msg before we release the lock.
2051 * Otherwise, race conditions may bite us. I
2052 * know that's pretty paranoid, but I prefer
2053 * to be correct.
2055 out_err:
2056 spin_unlock_irqrestore(&intf->seq_lock, flags);
2059 return rv;
2062 static int i_ipmi_req_lan(struct ipmi_smi *intf,
2063 struct ipmi_addr *addr,
2064 long msgid,
2065 struct kernel_ipmi_msg *msg,
2066 struct ipmi_smi_msg *smi_msg,
2067 struct ipmi_recv_msg *recv_msg,
2068 unsigned char source_lun,
2069 int retries,
2070 unsigned int retry_time_ms)
2072 struct ipmi_lan_addr *lan_addr;
2073 unsigned char ipmb_seq;
2074 long seqid;
2075 struct ipmi_channel *chans;
2076 int rv = 0;
2078 if (addr->channel >= IPMI_MAX_CHANNELS) {
2079 ipmi_inc_stat(intf, sent_invalid_commands);
2080 return -EINVAL;
2083 chans = READ_ONCE(intf->channel_list)->c;
2085 if ((chans[addr->channel].medium
2086 != IPMI_CHANNEL_MEDIUM_8023LAN)
2087 && (chans[addr->channel].medium
2088 != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2089 ipmi_inc_stat(intf, sent_invalid_commands);
2090 return -EINVAL;
2093 /* 11 for the header and 1 for the checksum. */
2094 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2095 ipmi_inc_stat(intf, sent_invalid_commands);
2096 return -EMSGSIZE;
2099 lan_addr = (struct ipmi_lan_addr *) addr;
2100 if (lan_addr->lun > 3) {
2101 ipmi_inc_stat(intf, sent_invalid_commands);
2102 return -EINVAL;
2105 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2107 if (recv_msg->msg.netfn & 0x1) {
2109 * It's a response, so use the user's sequence
2110 * from msgid.
2112 ipmi_inc_stat(intf, sent_lan_responses);
2113 format_lan_msg(smi_msg, msg, lan_addr, msgid,
2114 msgid, source_lun);
2117 * Save the receive message so we can use it
2118 * to deliver the response.
2120 smi_msg->user_data = recv_msg;
2121 } else {
2122 /* It's a command, so get a sequence for it. */
2123 unsigned long flags;
2125 spin_lock_irqsave(&intf->seq_lock, flags);
2128 * Create a sequence number with a 1 second
2129 * timeout and 4 retries.
2131 rv = intf_next_seq(intf,
2132 recv_msg,
2133 retry_time_ms,
2134 retries,
2136 &ipmb_seq,
2137 &seqid);
2138 if (rv)
2140 * We have used up all the sequence numbers,
2141 * probably, so abort.
2143 goto out_err;
2145 ipmi_inc_stat(intf, sent_lan_commands);
2148 * Store the sequence number in the message,
2149 * so that when the send message response
2150 * comes back we can start the timer.
2152 format_lan_msg(smi_msg, msg, lan_addr,
2153 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2154 ipmb_seq, source_lun);
2157 * Copy the message into the recv message data, so we
2158 * can retransmit it later if necessary.
2160 memcpy(recv_msg->msg_data, smi_msg->data,
2161 smi_msg->data_size);
2162 recv_msg->msg.data = recv_msg->msg_data;
2163 recv_msg->msg.data_len = smi_msg->data_size;
2166 * We don't unlock until here, because we need
2167 * to copy the completed message into the
2168 * recv_msg before we release the lock.
2169 * Otherwise, race conditions may bite us. I
2170 * know that's pretty paranoid, but I prefer
2171 * to be correct.
2173 out_err:
2174 spin_unlock_irqrestore(&intf->seq_lock, flags);
2177 return rv;
2181 * Separate from ipmi_request so that the user does not have to be
2182 * supplied in certain circumstances (mainly at panic time). If
2183 * messages are supplied, they will be freed, even if an error
2184 * occurs.
2186 static int i_ipmi_request(struct ipmi_user *user,
2187 struct ipmi_smi *intf,
2188 struct ipmi_addr *addr,
2189 long msgid,
2190 struct kernel_ipmi_msg *msg,
2191 void *user_msg_data,
2192 void *supplied_smi,
2193 struct ipmi_recv_msg *supplied_recv,
2194 int priority,
2195 unsigned char source_address,
2196 unsigned char source_lun,
2197 int retries,
2198 unsigned int retry_time_ms)
2200 struct ipmi_smi_msg *smi_msg;
2201 struct ipmi_recv_msg *recv_msg;
2202 int rv = 0;
2204 if (supplied_recv)
2205 recv_msg = supplied_recv;
2206 else {
2207 recv_msg = ipmi_alloc_recv_msg();
2208 if (recv_msg == NULL) {
2209 rv = -ENOMEM;
2210 goto out;
2213 recv_msg->user_msg_data = user_msg_data;
2215 if (supplied_smi)
2216 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2217 else {
2218 smi_msg = ipmi_alloc_smi_msg();
2219 if (smi_msg == NULL) {
2220 if (!supplied_recv)
2221 ipmi_free_recv_msg(recv_msg);
2222 rv = -ENOMEM;
2223 goto out;
2227 rcu_read_lock();
2228 if (intf->in_shutdown) {
2229 rv = -ENODEV;
2230 goto out_err;
2233 recv_msg->user = user;
2234 if (user)
2235 /* The put happens when the message is freed. */
2236 kref_get(&user->refcount);
2237 recv_msg->msgid = msgid;
2239 * Store the message to send in the receive message so timeout
2240 * responses can get the proper response data.
2242 recv_msg->msg = *msg;
2244 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2245 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2246 recv_msg, retries, retry_time_ms);
2247 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2248 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2249 source_address, source_lun,
2250 retries, retry_time_ms);
2251 } else if (is_lan_addr(addr)) {
2252 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2253 source_lun, retries, retry_time_ms);
2254 } else {
2255 /* Unknown address type. */
2256 ipmi_inc_stat(intf, sent_invalid_commands);
2257 rv = -EINVAL;
2260 if (rv) {
2261 out_err:
2262 ipmi_free_smi_msg(smi_msg);
2263 ipmi_free_recv_msg(recv_msg);
2264 } else {
2265 pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
2267 smi_send(intf, intf->handlers, smi_msg, priority);
2269 rcu_read_unlock();
2271 out:
2272 return rv;
2275 static int check_addr(struct ipmi_smi *intf,
2276 struct ipmi_addr *addr,
2277 unsigned char *saddr,
2278 unsigned char *lun)
2280 if (addr->channel >= IPMI_MAX_CHANNELS)
2281 return -EINVAL;
2282 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2283 *lun = intf->addrinfo[addr->channel].lun;
2284 *saddr = intf->addrinfo[addr->channel].address;
2285 return 0;
2288 int ipmi_request_settime(struct ipmi_user *user,
2289 struct ipmi_addr *addr,
2290 long msgid,
2291 struct kernel_ipmi_msg *msg,
2292 void *user_msg_data,
2293 int priority,
2294 int retries,
2295 unsigned int retry_time_ms)
2297 unsigned char saddr = 0, lun = 0;
2298 int rv, index;
2300 if (!user)
2301 return -EINVAL;
2303 user = acquire_ipmi_user(user, &index);
2304 if (!user)
2305 return -ENODEV;
2307 rv = check_addr(user->intf, addr, &saddr, &lun);
2308 if (!rv)
2309 rv = i_ipmi_request(user,
2310 user->intf,
2311 addr,
2312 msgid,
2313 msg,
2314 user_msg_data,
2315 NULL, NULL,
2316 priority,
2317 saddr,
2318 lun,
2319 retries,
2320 retry_time_ms);
2322 release_ipmi_user(user, index);
2323 return rv;
2325 EXPORT_SYMBOL(ipmi_request_settime);
2327 int ipmi_request_supply_msgs(struct ipmi_user *user,
2328 struct ipmi_addr *addr,
2329 long msgid,
2330 struct kernel_ipmi_msg *msg,
2331 void *user_msg_data,
2332 void *supplied_smi,
2333 struct ipmi_recv_msg *supplied_recv,
2334 int priority)
2336 unsigned char saddr = 0, lun = 0;
2337 int rv, index;
2339 if (!user)
2340 return -EINVAL;
2342 user = acquire_ipmi_user(user, &index);
2343 if (!user)
2344 return -ENODEV;
2346 rv = check_addr(user->intf, addr, &saddr, &lun);
2347 if (!rv)
2348 rv = i_ipmi_request(user,
2349 user->intf,
2350 addr,
2351 msgid,
2352 msg,
2353 user_msg_data,
2354 supplied_smi,
2355 supplied_recv,
2356 priority,
2357 saddr,
2358 lun,
2359 -1, 0);
2361 release_ipmi_user(user, index);
2362 return rv;
2364 EXPORT_SYMBOL(ipmi_request_supply_msgs);
2366 static void bmc_device_id_handler(struct ipmi_smi *intf,
2367 struct ipmi_recv_msg *msg)
2369 int rv;
2371 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2372 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2373 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2374 dev_warn(intf->si_dev,
2375 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2376 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2377 return;
2380 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2381 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2382 if (rv) {
2383 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2384 intf->bmc->dyn_id_set = 0;
2385 } else {
2387 * Make sure the id data is available before setting
2388 * dyn_id_set.
2390 smp_wmb();
2391 intf->bmc->dyn_id_set = 1;
2394 wake_up(&intf->waitq);
2397 static int
2398 send_get_device_id_cmd(struct ipmi_smi *intf)
2400 struct ipmi_system_interface_addr si;
2401 struct kernel_ipmi_msg msg;
2403 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2404 si.channel = IPMI_BMC_CHANNEL;
2405 si.lun = 0;
2407 msg.netfn = IPMI_NETFN_APP_REQUEST;
2408 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2409 msg.data = NULL;
2410 msg.data_len = 0;
2412 return i_ipmi_request(NULL,
2413 intf,
2414 (struct ipmi_addr *) &si,
2416 &msg,
2417 intf,
2418 NULL,
2419 NULL,
2421 intf->addrinfo[0].address,
2422 intf->addrinfo[0].lun,
2423 -1, 0);
2426 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2428 int rv;
2430 bmc->dyn_id_set = 2;
2432 intf->null_user_handler = bmc_device_id_handler;
2434 rv = send_get_device_id_cmd(intf);
2435 if (rv)
2436 return rv;
2438 wait_event(intf->waitq, bmc->dyn_id_set != 2);
2440 if (!bmc->dyn_id_set)
2441 rv = -EIO; /* Something went wrong in the fetch. */
2443 /* dyn_id_set makes the id data available. */
2444 smp_rmb();
2446 intf->null_user_handler = NULL;
2448 return rv;
2452 * Fetch the device id for the bmc/interface. You must pass in either
2453 * bmc or intf, this code will get the other one. If the data has
2454 * been recently fetched, this will just use the cached data. Otherwise
2455 * it will run a new fetch.
2457 * Except for the first time this is called (in ipmi_add_smi()),
2458 * this will always return good data;
2460 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2461 struct ipmi_device_id *id,
2462 bool *guid_set, guid_t *guid, int intf_num)
2464 int rv = 0;
2465 int prev_dyn_id_set, prev_guid_set;
2466 bool intf_set = intf != NULL;
2468 if (!intf) {
2469 mutex_lock(&bmc->dyn_mutex);
2470 retry_bmc_lock:
2471 if (list_empty(&bmc->intfs)) {
2472 mutex_unlock(&bmc->dyn_mutex);
2473 return -ENOENT;
2475 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2476 bmc_link);
2477 kref_get(&intf->refcount);
2478 mutex_unlock(&bmc->dyn_mutex);
2479 mutex_lock(&intf->bmc_reg_mutex);
2480 mutex_lock(&bmc->dyn_mutex);
2481 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2482 bmc_link)) {
2483 mutex_unlock(&intf->bmc_reg_mutex);
2484 kref_put(&intf->refcount, intf_free);
2485 goto retry_bmc_lock;
2487 } else {
2488 mutex_lock(&intf->bmc_reg_mutex);
2489 bmc = intf->bmc;
2490 mutex_lock(&bmc->dyn_mutex);
2491 kref_get(&intf->refcount);
2494 /* If we have a valid and current ID, just return that. */
2495 if (intf->in_bmc_register ||
2496 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2497 goto out_noprocessing;
2499 prev_guid_set = bmc->dyn_guid_set;
2500 __get_guid(intf);
2502 prev_dyn_id_set = bmc->dyn_id_set;
2503 rv = __get_device_id(intf, bmc);
2504 if (rv)
2505 goto out;
2508 * The guid, device id, manufacturer id, and product id should
2509 * not change on a BMC. If it does we have to do some dancing.
2511 if (!intf->bmc_registered
2512 || (!prev_guid_set && bmc->dyn_guid_set)
2513 || (!prev_dyn_id_set && bmc->dyn_id_set)
2514 || (prev_guid_set && bmc->dyn_guid_set
2515 && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2516 || bmc->id.device_id != bmc->fetch_id.device_id
2517 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2518 || bmc->id.product_id != bmc->fetch_id.product_id) {
2519 struct ipmi_device_id id = bmc->fetch_id;
2520 int guid_set = bmc->dyn_guid_set;
2521 guid_t guid;
2523 guid = bmc->fetch_guid;
2524 mutex_unlock(&bmc->dyn_mutex);
2526 __ipmi_bmc_unregister(intf);
2527 /* Fill in the temporary BMC for good measure. */
2528 intf->bmc->id = id;
2529 intf->bmc->dyn_guid_set = guid_set;
2530 intf->bmc->guid = guid;
2531 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2532 need_waiter(intf); /* Retry later on an error. */
2533 else
2534 __scan_channels(intf, &id);
2537 if (!intf_set) {
2539 * We weren't given the interface on the
2540 * command line, so restart the operation on
2541 * the next interface for the BMC.
2543 mutex_unlock(&intf->bmc_reg_mutex);
2544 mutex_lock(&bmc->dyn_mutex);
2545 goto retry_bmc_lock;
2548 /* We have a new BMC, set it up. */
2549 bmc = intf->bmc;
2550 mutex_lock(&bmc->dyn_mutex);
2551 goto out_noprocessing;
2552 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2553 /* Version info changes, scan the channels again. */
2554 __scan_channels(intf, &bmc->fetch_id);
2556 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2558 out:
2559 if (rv && prev_dyn_id_set) {
2560 rv = 0; /* Ignore failures if we have previous data. */
2561 bmc->dyn_id_set = prev_dyn_id_set;
2563 if (!rv) {
2564 bmc->id = bmc->fetch_id;
2565 if (bmc->dyn_guid_set)
2566 bmc->guid = bmc->fetch_guid;
2567 else if (prev_guid_set)
2569 * The guid used to be valid and it failed to fetch,
2570 * just use the cached value.
2572 bmc->dyn_guid_set = prev_guid_set;
2574 out_noprocessing:
2575 if (!rv) {
2576 if (id)
2577 *id = bmc->id;
2579 if (guid_set)
2580 *guid_set = bmc->dyn_guid_set;
2582 if (guid && bmc->dyn_guid_set)
2583 *guid = bmc->guid;
2586 mutex_unlock(&bmc->dyn_mutex);
2587 mutex_unlock(&intf->bmc_reg_mutex);
2589 kref_put(&intf->refcount, intf_free);
2590 return rv;
2593 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2594 struct ipmi_device_id *id,
2595 bool *guid_set, guid_t *guid)
2597 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2600 static ssize_t device_id_show(struct device *dev,
2601 struct device_attribute *attr,
2602 char *buf)
2604 struct bmc_device *bmc = to_bmc_device(dev);
2605 struct ipmi_device_id id;
2606 int rv;
2608 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2609 if (rv)
2610 return rv;
2612 return snprintf(buf, 10, "%u\n", id.device_id);
2614 static DEVICE_ATTR_RO(device_id);
2616 static ssize_t provides_device_sdrs_show(struct device *dev,
2617 struct device_attribute *attr,
2618 char *buf)
2620 struct bmc_device *bmc = to_bmc_device(dev);
2621 struct ipmi_device_id id;
2622 int rv;
2624 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2625 if (rv)
2626 return rv;
2628 return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2630 static DEVICE_ATTR_RO(provides_device_sdrs);
2632 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2633 char *buf)
2635 struct bmc_device *bmc = to_bmc_device(dev);
2636 struct ipmi_device_id id;
2637 int rv;
2639 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2640 if (rv)
2641 return rv;
2643 return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2645 static DEVICE_ATTR_RO(revision);
2647 static ssize_t firmware_revision_show(struct device *dev,
2648 struct device_attribute *attr,
2649 char *buf)
2651 struct bmc_device *bmc = to_bmc_device(dev);
2652 struct ipmi_device_id id;
2653 int rv;
2655 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2656 if (rv)
2657 return rv;
2659 return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2660 id.firmware_revision_2);
2662 static DEVICE_ATTR_RO(firmware_revision);
2664 static ssize_t ipmi_version_show(struct device *dev,
2665 struct device_attribute *attr,
2666 char *buf)
2668 struct bmc_device *bmc = to_bmc_device(dev);
2669 struct ipmi_device_id id;
2670 int rv;
2672 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2673 if (rv)
2674 return rv;
2676 return snprintf(buf, 20, "%u.%u\n",
2677 ipmi_version_major(&id),
2678 ipmi_version_minor(&id));
2680 static DEVICE_ATTR_RO(ipmi_version);
2682 static ssize_t add_dev_support_show(struct device *dev,
2683 struct device_attribute *attr,
2684 char *buf)
2686 struct bmc_device *bmc = to_bmc_device(dev);
2687 struct ipmi_device_id id;
2688 int rv;
2690 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2691 if (rv)
2692 return rv;
2694 return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2696 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2697 NULL);
2699 static ssize_t manufacturer_id_show(struct device *dev,
2700 struct device_attribute *attr,
2701 char *buf)
2703 struct bmc_device *bmc = to_bmc_device(dev);
2704 struct ipmi_device_id id;
2705 int rv;
2707 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2708 if (rv)
2709 return rv;
2711 return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2713 static DEVICE_ATTR_RO(manufacturer_id);
2715 static ssize_t product_id_show(struct device *dev,
2716 struct device_attribute *attr,
2717 char *buf)
2719 struct bmc_device *bmc = to_bmc_device(dev);
2720 struct ipmi_device_id id;
2721 int rv;
2723 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2724 if (rv)
2725 return rv;
2727 return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2729 static DEVICE_ATTR_RO(product_id);
2731 static ssize_t aux_firmware_rev_show(struct device *dev,
2732 struct device_attribute *attr,
2733 char *buf)
2735 struct bmc_device *bmc = to_bmc_device(dev);
2736 struct ipmi_device_id id;
2737 int rv;
2739 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2740 if (rv)
2741 return rv;
2743 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2744 id.aux_firmware_revision[3],
2745 id.aux_firmware_revision[2],
2746 id.aux_firmware_revision[1],
2747 id.aux_firmware_revision[0]);
2749 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2751 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2752 char *buf)
2754 struct bmc_device *bmc = to_bmc_device(dev);
2755 bool guid_set;
2756 guid_t guid;
2757 int rv;
2759 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2760 if (rv)
2761 return rv;
2762 if (!guid_set)
2763 return -ENOENT;
2765 return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
2767 static DEVICE_ATTR_RO(guid);
2769 static struct attribute *bmc_dev_attrs[] = {
2770 &dev_attr_device_id.attr,
2771 &dev_attr_provides_device_sdrs.attr,
2772 &dev_attr_revision.attr,
2773 &dev_attr_firmware_revision.attr,
2774 &dev_attr_ipmi_version.attr,
2775 &dev_attr_additional_device_support.attr,
2776 &dev_attr_manufacturer_id.attr,
2777 &dev_attr_product_id.attr,
2778 &dev_attr_aux_firmware_revision.attr,
2779 &dev_attr_guid.attr,
2780 NULL
2783 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2784 struct attribute *attr, int idx)
2786 struct device *dev = kobj_to_dev(kobj);
2787 struct bmc_device *bmc = to_bmc_device(dev);
2788 umode_t mode = attr->mode;
2789 int rv;
2791 if (attr == &dev_attr_aux_firmware_revision.attr) {
2792 struct ipmi_device_id id;
2794 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2795 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2797 if (attr == &dev_attr_guid.attr) {
2798 bool guid_set;
2800 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2801 return (!rv && guid_set) ? mode : 0;
2803 return mode;
2806 static const struct attribute_group bmc_dev_attr_group = {
2807 .attrs = bmc_dev_attrs,
2808 .is_visible = bmc_dev_attr_is_visible,
2811 static const struct attribute_group *bmc_dev_attr_groups[] = {
2812 &bmc_dev_attr_group,
2813 NULL
2816 static const struct device_type bmc_device_type = {
2817 .groups = bmc_dev_attr_groups,
2820 static int __find_bmc_guid(struct device *dev, const void *data)
2822 const guid_t *guid = data;
2823 struct bmc_device *bmc;
2824 int rv;
2826 if (dev->type != &bmc_device_type)
2827 return 0;
2829 bmc = to_bmc_device(dev);
2830 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2831 if (rv)
2832 rv = kref_get_unless_zero(&bmc->usecount);
2833 return rv;
2837 * Returns with the bmc's usecount incremented, if it is non-NULL.
2839 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2840 guid_t *guid)
2842 struct device *dev;
2843 struct bmc_device *bmc = NULL;
2845 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2846 if (dev) {
2847 bmc = to_bmc_device(dev);
2848 put_device(dev);
2850 return bmc;
2853 struct prod_dev_id {
2854 unsigned int product_id;
2855 unsigned char device_id;
2858 static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2860 const struct prod_dev_id *cid = data;
2861 struct bmc_device *bmc;
2862 int rv;
2864 if (dev->type != &bmc_device_type)
2865 return 0;
2867 bmc = to_bmc_device(dev);
2868 rv = (bmc->id.product_id == cid->product_id
2869 && bmc->id.device_id == cid->device_id);
2870 if (rv)
2871 rv = kref_get_unless_zero(&bmc->usecount);
2872 return rv;
2876 * Returns with the bmc's usecount incremented, if it is non-NULL.
2878 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2879 struct device_driver *drv,
2880 unsigned int product_id, unsigned char device_id)
2882 struct prod_dev_id id = {
2883 .product_id = product_id,
2884 .device_id = device_id,
2886 struct device *dev;
2887 struct bmc_device *bmc = NULL;
2889 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2890 if (dev) {
2891 bmc = to_bmc_device(dev);
2892 put_device(dev);
2894 return bmc;
2897 static DEFINE_IDA(ipmi_bmc_ida);
2899 static void
2900 release_bmc_device(struct device *dev)
2902 kfree(to_bmc_device(dev));
2905 static void cleanup_bmc_work(struct work_struct *work)
2907 struct bmc_device *bmc = container_of(work, struct bmc_device,
2908 remove_work);
2909 int id = bmc->pdev.id; /* Unregister overwrites id */
2911 platform_device_unregister(&bmc->pdev);
2912 ida_simple_remove(&ipmi_bmc_ida, id);
2915 static void
2916 cleanup_bmc_device(struct kref *ref)
2918 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2921 * Remove the platform device in a work queue to avoid issues
2922 * with removing the device attributes while reading a device
2923 * attribute.
2925 schedule_work(&bmc->remove_work);
2929 * Must be called with intf->bmc_reg_mutex held.
2931 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2933 struct bmc_device *bmc = intf->bmc;
2935 if (!intf->bmc_registered)
2936 return;
2938 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2939 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2940 kfree(intf->my_dev_name);
2941 intf->my_dev_name = NULL;
2943 mutex_lock(&bmc->dyn_mutex);
2944 list_del(&intf->bmc_link);
2945 mutex_unlock(&bmc->dyn_mutex);
2946 intf->bmc = &intf->tmp_bmc;
2947 kref_put(&bmc->usecount, cleanup_bmc_device);
2948 intf->bmc_registered = false;
2951 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2953 mutex_lock(&intf->bmc_reg_mutex);
2954 __ipmi_bmc_unregister(intf);
2955 mutex_unlock(&intf->bmc_reg_mutex);
2959 * Must be called with intf->bmc_reg_mutex held.
2961 static int __ipmi_bmc_register(struct ipmi_smi *intf,
2962 struct ipmi_device_id *id,
2963 bool guid_set, guid_t *guid, int intf_num)
2965 int rv;
2966 struct bmc_device *bmc;
2967 struct bmc_device *old_bmc;
2970 * platform_device_register() can cause bmc_reg_mutex to
2971 * be claimed because of the is_visible functions of
2972 * the attributes. Eliminate possible recursion and
2973 * release the lock.
2975 intf->in_bmc_register = true;
2976 mutex_unlock(&intf->bmc_reg_mutex);
2979 * Try to find if there is an bmc_device struct
2980 * representing the interfaced BMC already
2982 mutex_lock(&ipmidriver_mutex);
2983 if (guid_set)
2984 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2985 else
2986 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2987 id->product_id,
2988 id->device_id);
2991 * If there is already an bmc_device, free the new one,
2992 * otherwise register the new BMC device
2994 if (old_bmc) {
2995 bmc = old_bmc;
2997 * Note: old_bmc already has usecount incremented by
2998 * the BMC find functions.
3000 intf->bmc = old_bmc;
3001 mutex_lock(&bmc->dyn_mutex);
3002 list_add_tail(&intf->bmc_link, &bmc->intfs);
3003 mutex_unlock(&bmc->dyn_mutex);
3005 dev_info(intf->si_dev,
3006 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3007 bmc->id.manufacturer_id,
3008 bmc->id.product_id,
3009 bmc->id.device_id);
3010 } else {
3011 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3012 if (!bmc) {
3013 rv = -ENOMEM;
3014 goto out;
3016 INIT_LIST_HEAD(&bmc->intfs);
3017 mutex_init(&bmc->dyn_mutex);
3018 INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3020 bmc->id = *id;
3021 bmc->dyn_id_set = 1;
3022 bmc->dyn_guid_set = guid_set;
3023 bmc->guid = *guid;
3024 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3026 bmc->pdev.name = "ipmi_bmc";
3028 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
3029 if (rv < 0) {
3030 kfree(bmc);
3031 goto out;
3034 bmc->pdev.dev.driver = &ipmidriver.driver;
3035 bmc->pdev.id = rv;
3036 bmc->pdev.dev.release = release_bmc_device;
3037 bmc->pdev.dev.type = &bmc_device_type;
3038 kref_init(&bmc->usecount);
3040 intf->bmc = bmc;
3041 mutex_lock(&bmc->dyn_mutex);
3042 list_add_tail(&intf->bmc_link, &bmc->intfs);
3043 mutex_unlock(&bmc->dyn_mutex);
3045 rv = platform_device_register(&bmc->pdev);
3046 if (rv) {
3047 dev_err(intf->si_dev,
3048 "Unable to register bmc device: %d\n",
3049 rv);
3050 goto out_list_del;
3053 dev_info(intf->si_dev,
3054 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3055 bmc->id.manufacturer_id,
3056 bmc->id.product_id,
3057 bmc->id.device_id);
3061 * create symlink from system interface device to bmc device
3062 * and back.
3064 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3065 if (rv) {
3066 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3067 goto out_put_bmc;
3070 if (intf_num == -1)
3071 intf_num = intf->intf_num;
3072 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3073 if (!intf->my_dev_name) {
3074 rv = -ENOMEM;
3075 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3076 rv);
3077 goto out_unlink1;
3080 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3081 intf->my_dev_name);
3082 if (rv) {
3083 kfree(intf->my_dev_name);
3084 intf->my_dev_name = NULL;
3085 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3086 rv);
3087 goto out_free_my_dev_name;
3090 intf->bmc_registered = true;
3092 out:
3093 mutex_unlock(&ipmidriver_mutex);
3094 mutex_lock(&intf->bmc_reg_mutex);
3095 intf->in_bmc_register = false;
3096 return rv;
3099 out_free_my_dev_name:
3100 kfree(intf->my_dev_name);
3101 intf->my_dev_name = NULL;
3103 out_unlink1:
3104 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3106 out_put_bmc:
3107 mutex_lock(&bmc->dyn_mutex);
3108 list_del(&intf->bmc_link);
3109 mutex_unlock(&bmc->dyn_mutex);
3110 intf->bmc = &intf->tmp_bmc;
3111 kref_put(&bmc->usecount, cleanup_bmc_device);
3112 goto out;
3114 out_list_del:
3115 mutex_lock(&bmc->dyn_mutex);
3116 list_del(&intf->bmc_link);
3117 mutex_unlock(&bmc->dyn_mutex);
3118 intf->bmc = &intf->tmp_bmc;
3119 put_device(&bmc->pdev.dev);
3120 goto out;
3123 static int
3124 send_guid_cmd(struct ipmi_smi *intf, int chan)
3126 struct kernel_ipmi_msg msg;
3127 struct ipmi_system_interface_addr si;
3129 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3130 si.channel = IPMI_BMC_CHANNEL;
3131 si.lun = 0;
3133 msg.netfn = IPMI_NETFN_APP_REQUEST;
3134 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3135 msg.data = NULL;
3136 msg.data_len = 0;
3137 return i_ipmi_request(NULL,
3138 intf,
3139 (struct ipmi_addr *) &si,
3141 &msg,
3142 intf,
3143 NULL,
3144 NULL,
3146 intf->addrinfo[0].address,
3147 intf->addrinfo[0].lun,
3148 -1, 0);
3151 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3153 struct bmc_device *bmc = intf->bmc;
3155 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3156 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3157 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3158 /* Not for me */
3159 return;
3161 if (msg->msg.data[0] != 0) {
3162 /* Error from getting the GUID, the BMC doesn't have one. */
3163 bmc->dyn_guid_set = 0;
3164 goto out;
3167 if (msg->msg.data_len < UUID_SIZE + 1) {
3168 bmc->dyn_guid_set = 0;
3169 dev_warn(intf->si_dev,
3170 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
3171 msg->msg.data_len, UUID_SIZE + 1);
3172 goto out;
3175 import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3177 * Make sure the guid data is available before setting
3178 * dyn_guid_set.
3180 smp_wmb();
3181 bmc->dyn_guid_set = 1;
3182 out:
3183 wake_up(&intf->waitq);
3186 static void __get_guid(struct ipmi_smi *intf)
3188 int rv;
3189 struct bmc_device *bmc = intf->bmc;
3191 bmc->dyn_guid_set = 2;
3192 intf->null_user_handler = guid_handler;
3193 rv = send_guid_cmd(intf, 0);
3194 if (rv)
3195 /* Send failed, no GUID available. */
3196 bmc->dyn_guid_set = 0;
3197 else
3198 wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3200 /* dyn_guid_set makes the guid data available. */
3201 smp_rmb();
3203 intf->null_user_handler = NULL;
3206 static int
3207 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3209 struct kernel_ipmi_msg msg;
3210 unsigned char data[1];
3211 struct ipmi_system_interface_addr si;
3213 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3214 si.channel = IPMI_BMC_CHANNEL;
3215 si.lun = 0;
3217 msg.netfn = IPMI_NETFN_APP_REQUEST;
3218 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3219 msg.data = data;
3220 msg.data_len = 1;
3221 data[0] = chan;
3222 return i_ipmi_request(NULL,
3223 intf,
3224 (struct ipmi_addr *) &si,
3226 &msg,
3227 intf,
3228 NULL,
3229 NULL,
3231 intf->addrinfo[0].address,
3232 intf->addrinfo[0].lun,
3233 -1, 0);
3236 static void
3237 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3239 int rv = 0;
3240 int ch;
3241 unsigned int set = intf->curr_working_cset;
3242 struct ipmi_channel *chans;
3244 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3245 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3246 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3247 /* It's the one we want */
3248 if (msg->msg.data[0] != 0) {
3249 /* Got an error from the channel, just go on. */
3251 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3253 * If the MC does not support this
3254 * command, that is legal. We just
3255 * assume it has one IPMB at channel
3256 * zero.
3258 intf->wchannels[set].c[0].medium
3259 = IPMI_CHANNEL_MEDIUM_IPMB;
3260 intf->wchannels[set].c[0].protocol
3261 = IPMI_CHANNEL_PROTOCOL_IPMB;
3263 intf->channel_list = intf->wchannels + set;
3264 intf->channels_ready = true;
3265 wake_up(&intf->waitq);
3266 goto out;
3268 goto next_channel;
3270 if (msg->msg.data_len < 4) {
3271 /* Message not big enough, just go on. */
3272 goto next_channel;
3274 ch = intf->curr_channel;
3275 chans = intf->wchannels[set].c;
3276 chans[ch].medium = msg->msg.data[2] & 0x7f;
3277 chans[ch].protocol = msg->msg.data[3] & 0x1f;
3279 next_channel:
3280 intf->curr_channel++;
3281 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3282 intf->channel_list = intf->wchannels + set;
3283 intf->channels_ready = true;
3284 wake_up(&intf->waitq);
3285 } else {
3286 intf->channel_list = intf->wchannels + set;
3287 intf->channels_ready = true;
3288 rv = send_channel_info_cmd(intf, intf->curr_channel);
3291 if (rv) {
3292 /* Got an error somehow, just give up. */
3293 dev_warn(intf->si_dev,
3294 "Error sending channel information for channel %d: %d\n",
3295 intf->curr_channel, rv);
3297 intf->channel_list = intf->wchannels + set;
3298 intf->channels_ready = true;
3299 wake_up(&intf->waitq);
3302 out:
3303 return;
3307 * Must be holding intf->bmc_reg_mutex to call this.
3309 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3311 int rv;
3313 if (ipmi_version_major(id) > 1
3314 || (ipmi_version_major(id) == 1
3315 && ipmi_version_minor(id) >= 5)) {
3316 unsigned int set;
3319 * Start scanning the channels to see what is
3320 * available.
3322 set = !intf->curr_working_cset;
3323 intf->curr_working_cset = set;
3324 memset(&intf->wchannels[set], 0,
3325 sizeof(struct ipmi_channel_set));
3327 intf->null_user_handler = channel_handler;
3328 intf->curr_channel = 0;
3329 rv = send_channel_info_cmd(intf, 0);
3330 if (rv) {
3331 dev_warn(intf->si_dev,
3332 "Error sending channel information for channel 0, %d\n",
3333 rv);
3334 return -EIO;
3337 /* Wait for the channel info to be read. */
3338 wait_event(intf->waitq, intf->channels_ready);
3339 intf->null_user_handler = NULL;
3340 } else {
3341 unsigned int set = intf->curr_working_cset;
3343 /* Assume a single IPMB channel at zero. */
3344 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3345 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3346 intf->channel_list = intf->wchannels + set;
3347 intf->channels_ready = true;
3350 return 0;
3353 static void ipmi_poll(struct ipmi_smi *intf)
3355 if (intf->handlers->poll)
3356 intf->handlers->poll(intf->send_info);
3357 /* In case something came in */
3358 handle_new_recv_msgs(intf);
3361 void ipmi_poll_interface(struct ipmi_user *user)
3363 ipmi_poll(user->intf);
3365 EXPORT_SYMBOL(ipmi_poll_interface);
3367 static void redo_bmc_reg(struct work_struct *work)
3369 struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3370 bmc_reg_work);
3372 if (!intf->in_shutdown)
3373 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3375 kref_put(&intf->refcount, intf_free);
3378 int ipmi_add_smi(struct module *owner,
3379 const struct ipmi_smi_handlers *handlers,
3380 void *send_info,
3381 struct device *si_dev,
3382 unsigned char slave_addr)
3384 int i, j;
3385 int rv;
3386 struct ipmi_smi *intf, *tintf;
3387 struct list_head *link;
3388 struct ipmi_device_id id;
3391 * Make sure the driver is actually initialized, this handles
3392 * problems with initialization order.
3394 rv = ipmi_init_msghandler();
3395 if (rv)
3396 return rv;
3398 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3399 if (!intf)
3400 return -ENOMEM;
3402 rv = init_srcu_struct(&intf->users_srcu);
3403 if (rv) {
3404 kfree(intf);
3405 return rv;
3408 intf->owner = owner;
3409 intf->bmc = &intf->tmp_bmc;
3410 INIT_LIST_HEAD(&intf->bmc->intfs);
3411 mutex_init(&intf->bmc->dyn_mutex);
3412 INIT_LIST_HEAD(&intf->bmc_link);
3413 mutex_init(&intf->bmc_reg_mutex);
3414 intf->intf_num = -1; /* Mark it invalid for now. */
3415 kref_init(&intf->refcount);
3416 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3417 intf->si_dev = si_dev;
3418 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3419 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3420 intf->addrinfo[j].lun = 2;
3422 if (slave_addr != 0)
3423 intf->addrinfo[0].address = slave_addr;
3424 INIT_LIST_HEAD(&intf->users);
3425 intf->handlers = handlers;
3426 intf->send_info = send_info;
3427 spin_lock_init(&intf->seq_lock);
3428 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3429 intf->seq_table[j].inuse = 0;
3430 intf->seq_table[j].seqid = 0;
3432 intf->curr_seq = 0;
3433 spin_lock_init(&intf->waiting_rcv_msgs_lock);
3434 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3435 tasklet_init(&intf->recv_tasklet,
3436 smi_recv_tasklet,
3437 (unsigned long) intf);
3438 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3439 spin_lock_init(&intf->xmit_msgs_lock);
3440 INIT_LIST_HEAD(&intf->xmit_msgs);
3441 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3442 spin_lock_init(&intf->events_lock);
3443 spin_lock_init(&intf->watch_lock);
3444 atomic_set(&intf->event_waiters, 0);
3445 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3446 INIT_LIST_HEAD(&intf->waiting_events);
3447 intf->waiting_events_count = 0;
3448 mutex_init(&intf->cmd_rcvrs_mutex);
3449 spin_lock_init(&intf->maintenance_mode_lock);
3450 INIT_LIST_HEAD(&intf->cmd_rcvrs);
3451 init_waitqueue_head(&intf->waitq);
3452 for (i = 0; i < IPMI_NUM_STATS; i++)
3453 atomic_set(&intf->stats[i], 0);
3455 mutex_lock(&ipmi_interfaces_mutex);
3456 /* Look for a hole in the numbers. */
3457 i = 0;
3458 link = &ipmi_interfaces;
3459 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
3460 ipmi_interfaces_mutex_held()) {
3461 if (tintf->intf_num != i) {
3462 link = &tintf->link;
3463 break;
3465 i++;
3467 /* Add the new interface in numeric order. */
3468 if (i == 0)
3469 list_add_rcu(&intf->link, &ipmi_interfaces);
3470 else
3471 list_add_tail_rcu(&intf->link, link);
3473 rv = handlers->start_processing(send_info, intf);
3474 if (rv)
3475 goto out_err;
3477 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3478 if (rv) {
3479 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3480 goto out_err_started;
3483 mutex_lock(&intf->bmc_reg_mutex);
3484 rv = __scan_channels(intf, &id);
3485 mutex_unlock(&intf->bmc_reg_mutex);
3486 if (rv)
3487 goto out_err_bmc_reg;
3490 * Keep memory order straight for RCU readers. Make
3491 * sure everything else is committed to memory before
3492 * setting intf_num to mark the interface valid.
3494 smp_wmb();
3495 intf->intf_num = i;
3496 mutex_unlock(&ipmi_interfaces_mutex);
3498 /* After this point the interface is legal to use. */
3499 call_smi_watchers(i, intf->si_dev);
3501 return 0;
3503 out_err_bmc_reg:
3504 ipmi_bmc_unregister(intf);
3505 out_err_started:
3506 if (intf->handlers->shutdown)
3507 intf->handlers->shutdown(intf->send_info);
3508 out_err:
3509 list_del_rcu(&intf->link);
3510 mutex_unlock(&ipmi_interfaces_mutex);
3511 synchronize_srcu(&ipmi_interfaces_srcu);
3512 cleanup_srcu_struct(&intf->users_srcu);
3513 kref_put(&intf->refcount, intf_free);
3515 return rv;
3517 EXPORT_SYMBOL(ipmi_add_smi);
3519 static void deliver_smi_err_response(struct ipmi_smi *intf,
3520 struct ipmi_smi_msg *msg,
3521 unsigned char err)
3523 msg->rsp[0] = msg->data[0] | 4;
3524 msg->rsp[1] = msg->data[1];
3525 msg->rsp[2] = err;
3526 msg->rsp_size = 3;
3527 /* It's an error, so it will never requeue, no need to check return. */
3528 handle_one_recv_msg(intf, msg);
3531 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3533 int i;
3534 struct seq_table *ent;
3535 struct ipmi_smi_msg *msg;
3536 struct list_head *entry;
3537 struct list_head tmplist;
3539 /* Clear out our transmit queues and hold the messages. */
3540 INIT_LIST_HEAD(&tmplist);
3541 list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3542 list_splice_tail(&intf->xmit_msgs, &tmplist);
3544 /* Current message first, to preserve order */
3545 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3546 /* Wait for the message to clear out. */
3547 schedule_timeout(1);
3550 /* No need for locks, the interface is down. */
3553 * Return errors for all pending messages in queue and in the
3554 * tables waiting for remote responses.
3556 while (!list_empty(&tmplist)) {
3557 entry = tmplist.next;
3558 list_del(entry);
3559 msg = list_entry(entry, struct ipmi_smi_msg, link);
3560 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3563 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3564 ent = &intf->seq_table[i];
3565 if (!ent->inuse)
3566 continue;
3567 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3571 void ipmi_unregister_smi(struct ipmi_smi *intf)
3573 struct ipmi_smi_watcher *w;
3574 int intf_num = intf->intf_num, index;
3576 mutex_lock(&ipmi_interfaces_mutex);
3577 intf->intf_num = -1;
3578 intf->in_shutdown = true;
3579 list_del_rcu(&intf->link);
3580 mutex_unlock(&ipmi_interfaces_mutex);
3581 synchronize_srcu(&ipmi_interfaces_srcu);
3583 /* At this point no users can be added to the interface. */
3586 * Call all the watcher interfaces to tell them that
3587 * an interface is going away.
3589 mutex_lock(&smi_watchers_mutex);
3590 list_for_each_entry(w, &smi_watchers, link)
3591 w->smi_gone(intf_num);
3592 mutex_unlock(&smi_watchers_mutex);
3594 index = srcu_read_lock(&intf->users_srcu);
3595 while (!list_empty(&intf->users)) {
3596 struct ipmi_user *user =
3597 container_of(list_next_rcu(&intf->users),
3598 struct ipmi_user, link);
3600 _ipmi_destroy_user(user);
3602 srcu_read_unlock(&intf->users_srcu, index);
3604 if (intf->handlers->shutdown)
3605 intf->handlers->shutdown(intf->send_info);
3607 cleanup_smi_msgs(intf);
3609 ipmi_bmc_unregister(intf);
3611 cleanup_srcu_struct(&intf->users_srcu);
3612 kref_put(&intf->refcount, intf_free);
3614 EXPORT_SYMBOL(ipmi_unregister_smi);
3616 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3617 struct ipmi_smi_msg *msg)
3619 struct ipmi_ipmb_addr ipmb_addr;
3620 struct ipmi_recv_msg *recv_msg;
3623 * This is 11, not 10, because the response must contain a
3624 * completion code.
3626 if (msg->rsp_size < 11) {
3627 /* Message not big enough, just ignore it. */
3628 ipmi_inc_stat(intf, invalid_ipmb_responses);
3629 return 0;
3632 if (msg->rsp[2] != 0) {
3633 /* An error getting the response, just ignore it. */
3634 return 0;
3637 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3638 ipmb_addr.slave_addr = msg->rsp[6];
3639 ipmb_addr.channel = msg->rsp[3] & 0x0f;
3640 ipmb_addr.lun = msg->rsp[7] & 3;
3643 * It's a response from a remote entity. Look up the sequence
3644 * number and handle the response.
3646 if (intf_find_seq(intf,
3647 msg->rsp[7] >> 2,
3648 msg->rsp[3] & 0x0f,
3649 msg->rsp[8],
3650 (msg->rsp[4] >> 2) & (~1),
3651 (struct ipmi_addr *) &ipmb_addr,
3652 &recv_msg)) {
3654 * We were unable to find the sequence number,
3655 * so just nuke the message.
3657 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3658 return 0;
3661 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3663 * The other fields matched, so no need to set them, except
3664 * for netfn, which needs to be the response that was
3665 * returned, not the request value.
3667 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3668 recv_msg->msg.data = recv_msg->msg_data;
3669 recv_msg->msg.data_len = msg->rsp_size - 10;
3670 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3671 if (deliver_response(intf, recv_msg))
3672 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3673 else
3674 ipmi_inc_stat(intf, handled_ipmb_responses);
3676 return 0;
3679 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3680 struct ipmi_smi_msg *msg)
3682 struct cmd_rcvr *rcvr;
3683 int rv = 0;
3684 unsigned char netfn;
3685 unsigned char cmd;
3686 unsigned char chan;
3687 struct ipmi_user *user = NULL;
3688 struct ipmi_ipmb_addr *ipmb_addr;
3689 struct ipmi_recv_msg *recv_msg;
3691 if (msg->rsp_size < 10) {
3692 /* Message not big enough, just ignore it. */
3693 ipmi_inc_stat(intf, invalid_commands);
3694 return 0;
3697 if (msg->rsp[2] != 0) {
3698 /* An error getting the response, just ignore it. */
3699 return 0;
3702 netfn = msg->rsp[4] >> 2;
3703 cmd = msg->rsp[8];
3704 chan = msg->rsp[3] & 0xf;
3706 rcu_read_lock();
3707 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3708 if (rcvr) {
3709 user = rcvr->user;
3710 kref_get(&user->refcount);
3711 } else
3712 user = NULL;
3713 rcu_read_unlock();
3715 if (user == NULL) {
3716 /* We didn't find a user, deliver an error response. */
3717 ipmi_inc_stat(intf, unhandled_commands);
3719 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3720 msg->data[1] = IPMI_SEND_MSG_CMD;
3721 msg->data[2] = msg->rsp[3];
3722 msg->data[3] = msg->rsp[6];
3723 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3724 msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3725 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3726 /* rqseq/lun */
3727 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3728 msg->data[8] = msg->rsp[8]; /* cmd */
3729 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3730 msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3731 msg->data_size = 11;
3733 pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
3735 rcu_read_lock();
3736 if (!intf->in_shutdown) {
3737 smi_send(intf, intf->handlers, msg, 0);
3739 * We used the message, so return the value
3740 * that causes it to not be freed or
3741 * queued.
3743 rv = -1;
3745 rcu_read_unlock();
3746 } else {
3747 recv_msg = ipmi_alloc_recv_msg();
3748 if (!recv_msg) {
3750 * We couldn't allocate memory for the
3751 * message, so requeue it for handling
3752 * later.
3754 rv = 1;
3755 kref_put(&user->refcount, free_user);
3756 } else {
3757 /* Extract the source address from the data. */
3758 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3759 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3760 ipmb_addr->slave_addr = msg->rsp[6];
3761 ipmb_addr->lun = msg->rsp[7] & 3;
3762 ipmb_addr->channel = msg->rsp[3] & 0xf;
3765 * Extract the rest of the message information
3766 * from the IPMB header.
3768 recv_msg->user = user;
3769 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3770 recv_msg->msgid = msg->rsp[7] >> 2;
3771 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3772 recv_msg->msg.cmd = msg->rsp[8];
3773 recv_msg->msg.data = recv_msg->msg_data;
3776 * We chop off 10, not 9 bytes because the checksum
3777 * at the end also needs to be removed.
3779 recv_msg->msg.data_len = msg->rsp_size - 10;
3780 memcpy(recv_msg->msg_data, &msg->rsp[9],
3781 msg->rsp_size - 10);
3782 if (deliver_response(intf, recv_msg))
3783 ipmi_inc_stat(intf, unhandled_commands);
3784 else
3785 ipmi_inc_stat(intf, handled_commands);
3789 return rv;
3792 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3793 struct ipmi_smi_msg *msg)
3795 struct ipmi_lan_addr lan_addr;
3796 struct ipmi_recv_msg *recv_msg;
3800 * This is 13, not 12, because the response must contain a
3801 * completion code.
3803 if (msg->rsp_size < 13) {
3804 /* Message not big enough, just ignore it. */
3805 ipmi_inc_stat(intf, invalid_lan_responses);
3806 return 0;
3809 if (msg->rsp[2] != 0) {
3810 /* An error getting the response, just ignore it. */
3811 return 0;
3814 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3815 lan_addr.session_handle = msg->rsp[4];
3816 lan_addr.remote_SWID = msg->rsp[8];
3817 lan_addr.local_SWID = msg->rsp[5];
3818 lan_addr.channel = msg->rsp[3] & 0x0f;
3819 lan_addr.privilege = msg->rsp[3] >> 4;
3820 lan_addr.lun = msg->rsp[9] & 3;
3823 * It's a response from a remote entity. Look up the sequence
3824 * number and handle the response.
3826 if (intf_find_seq(intf,
3827 msg->rsp[9] >> 2,
3828 msg->rsp[3] & 0x0f,
3829 msg->rsp[10],
3830 (msg->rsp[6] >> 2) & (~1),
3831 (struct ipmi_addr *) &lan_addr,
3832 &recv_msg)) {
3834 * We were unable to find the sequence number,
3835 * so just nuke the message.
3837 ipmi_inc_stat(intf, unhandled_lan_responses);
3838 return 0;
3841 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3843 * The other fields matched, so no need to set them, except
3844 * for netfn, which needs to be the response that was
3845 * returned, not the request value.
3847 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3848 recv_msg->msg.data = recv_msg->msg_data;
3849 recv_msg->msg.data_len = msg->rsp_size - 12;
3850 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3851 if (deliver_response(intf, recv_msg))
3852 ipmi_inc_stat(intf, unhandled_lan_responses);
3853 else
3854 ipmi_inc_stat(intf, handled_lan_responses);
3856 return 0;
3859 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3860 struct ipmi_smi_msg *msg)
3862 struct cmd_rcvr *rcvr;
3863 int rv = 0;
3864 unsigned char netfn;
3865 unsigned char cmd;
3866 unsigned char chan;
3867 struct ipmi_user *user = NULL;
3868 struct ipmi_lan_addr *lan_addr;
3869 struct ipmi_recv_msg *recv_msg;
3871 if (msg->rsp_size < 12) {
3872 /* Message not big enough, just ignore it. */
3873 ipmi_inc_stat(intf, invalid_commands);
3874 return 0;
3877 if (msg->rsp[2] != 0) {
3878 /* An error getting the response, just ignore it. */
3879 return 0;
3882 netfn = msg->rsp[6] >> 2;
3883 cmd = msg->rsp[10];
3884 chan = msg->rsp[3] & 0xf;
3886 rcu_read_lock();
3887 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3888 if (rcvr) {
3889 user = rcvr->user;
3890 kref_get(&user->refcount);
3891 } else
3892 user = NULL;
3893 rcu_read_unlock();
3895 if (user == NULL) {
3896 /* We didn't find a user, just give up. */
3897 ipmi_inc_stat(intf, unhandled_commands);
3900 * Don't do anything with these messages, just allow
3901 * them to be freed.
3903 rv = 0;
3904 } else {
3905 recv_msg = ipmi_alloc_recv_msg();
3906 if (!recv_msg) {
3908 * We couldn't allocate memory for the
3909 * message, so requeue it for handling later.
3911 rv = 1;
3912 kref_put(&user->refcount, free_user);
3913 } else {
3914 /* Extract the source address from the data. */
3915 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3916 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3917 lan_addr->session_handle = msg->rsp[4];
3918 lan_addr->remote_SWID = msg->rsp[8];
3919 lan_addr->local_SWID = msg->rsp[5];
3920 lan_addr->lun = msg->rsp[9] & 3;
3921 lan_addr->channel = msg->rsp[3] & 0xf;
3922 lan_addr->privilege = msg->rsp[3] >> 4;
3925 * Extract the rest of the message information
3926 * from the IPMB header.
3928 recv_msg->user = user;
3929 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3930 recv_msg->msgid = msg->rsp[9] >> 2;
3931 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3932 recv_msg->msg.cmd = msg->rsp[10];
3933 recv_msg->msg.data = recv_msg->msg_data;
3936 * We chop off 12, not 11 bytes because the checksum
3937 * at the end also needs to be removed.
3939 recv_msg->msg.data_len = msg->rsp_size - 12;
3940 memcpy(recv_msg->msg_data, &msg->rsp[11],
3941 msg->rsp_size - 12);
3942 if (deliver_response(intf, recv_msg))
3943 ipmi_inc_stat(intf, unhandled_commands);
3944 else
3945 ipmi_inc_stat(intf, handled_commands);
3949 return rv;
3953 * This routine will handle "Get Message" command responses with
3954 * channels that use an OEM Medium. The message format belongs to
3955 * the OEM. See IPMI 2.0 specification, Chapter 6 and
3956 * Chapter 22, sections 22.6 and 22.24 for more details.
3958 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3959 struct ipmi_smi_msg *msg)
3961 struct cmd_rcvr *rcvr;
3962 int rv = 0;
3963 unsigned char netfn;
3964 unsigned char cmd;
3965 unsigned char chan;
3966 struct ipmi_user *user = NULL;
3967 struct ipmi_system_interface_addr *smi_addr;
3968 struct ipmi_recv_msg *recv_msg;
3971 * We expect the OEM SW to perform error checking
3972 * so we just do some basic sanity checks
3974 if (msg->rsp_size < 4) {
3975 /* Message not big enough, just ignore it. */
3976 ipmi_inc_stat(intf, invalid_commands);
3977 return 0;
3980 if (msg->rsp[2] != 0) {
3981 /* An error getting the response, just ignore it. */
3982 return 0;
3986 * This is an OEM Message so the OEM needs to know how
3987 * handle the message. We do no interpretation.
3989 netfn = msg->rsp[0] >> 2;
3990 cmd = msg->rsp[1];
3991 chan = msg->rsp[3] & 0xf;
3993 rcu_read_lock();
3994 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3995 if (rcvr) {
3996 user = rcvr->user;
3997 kref_get(&user->refcount);
3998 } else
3999 user = NULL;
4000 rcu_read_unlock();
4002 if (user == NULL) {
4003 /* We didn't find a user, just give up. */
4004 ipmi_inc_stat(intf, unhandled_commands);
4007 * Don't do anything with these messages, just allow
4008 * them to be freed.
4011 rv = 0;
4012 } else {
4013 recv_msg = ipmi_alloc_recv_msg();
4014 if (!recv_msg) {
4016 * We couldn't allocate memory for the
4017 * message, so requeue it for handling
4018 * later.
4020 rv = 1;
4021 kref_put(&user->refcount, free_user);
4022 } else {
4024 * OEM Messages are expected to be delivered via
4025 * the system interface to SMS software. We might
4026 * need to visit this again depending on OEM
4027 * requirements
4029 smi_addr = ((struct ipmi_system_interface_addr *)
4030 &recv_msg->addr);
4031 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4032 smi_addr->channel = IPMI_BMC_CHANNEL;
4033 smi_addr->lun = msg->rsp[0] & 3;
4035 recv_msg->user = user;
4036 recv_msg->user_msg_data = NULL;
4037 recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4038 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4039 recv_msg->msg.cmd = msg->rsp[1];
4040 recv_msg->msg.data = recv_msg->msg_data;
4043 * The message starts at byte 4 which follows the
4044 * the Channel Byte in the "GET MESSAGE" command
4046 recv_msg->msg.data_len = msg->rsp_size - 4;
4047 memcpy(recv_msg->msg_data, &msg->rsp[4],
4048 msg->rsp_size - 4);
4049 if (deliver_response(intf, recv_msg))
4050 ipmi_inc_stat(intf, unhandled_commands);
4051 else
4052 ipmi_inc_stat(intf, handled_commands);
4056 return rv;
4059 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4060 struct ipmi_smi_msg *msg)
4062 struct ipmi_system_interface_addr *smi_addr;
4064 recv_msg->msgid = 0;
4065 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4066 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4067 smi_addr->channel = IPMI_BMC_CHANNEL;
4068 smi_addr->lun = msg->rsp[0] & 3;
4069 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4070 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4071 recv_msg->msg.cmd = msg->rsp[1];
4072 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4073 recv_msg->msg.data = recv_msg->msg_data;
4074 recv_msg->msg.data_len = msg->rsp_size - 3;
4077 static int handle_read_event_rsp(struct ipmi_smi *intf,
4078 struct ipmi_smi_msg *msg)
4080 struct ipmi_recv_msg *recv_msg, *recv_msg2;
4081 struct list_head msgs;
4082 struct ipmi_user *user;
4083 int rv = 0, deliver_count = 0, index;
4084 unsigned long flags;
4086 if (msg->rsp_size < 19) {
4087 /* Message is too small to be an IPMB event. */
4088 ipmi_inc_stat(intf, invalid_events);
4089 return 0;
4092 if (msg->rsp[2] != 0) {
4093 /* An error getting the event, just ignore it. */
4094 return 0;
4097 INIT_LIST_HEAD(&msgs);
4099 spin_lock_irqsave(&intf->events_lock, flags);
4101 ipmi_inc_stat(intf, events);
4104 * Allocate and fill in one message for every user that is
4105 * getting events.
4107 index = srcu_read_lock(&intf->users_srcu);
4108 list_for_each_entry_rcu(user, &intf->users, link) {
4109 if (!user->gets_events)
4110 continue;
4112 recv_msg = ipmi_alloc_recv_msg();
4113 if (!recv_msg) {
4114 rcu_read_unlock();
4115 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4116 link) {
4117 list_del(&recv_msg->link);
4118 ipmi_free_recv_msg(recv_msg);
4121 * We couldn't allocate memory for the
4122 * message, so requeue it for handling
4123 * later.
4125 rv = 1;
4126 goto out;
4129 deliver_count++;
4131 copy_event_into_recv_msg(recv_msg, msg);
4132 recv_msg->user = user;
4133 kref_get(&user->refcount);
4134 list_add_tail(&recv_msg->link, &msgs);
4136 srcu_read_unlock(&intf->users_srcu, index);
4138 if (deliver_count) {
4139 /* Now deliver all the messages. */
4140 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4141 list_del(&recv_msg->link);
4142 deliver_local_response(intf, recv_msg);
4144 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4146 * No one to receive the message, put it in queue if there's
4147 * not already too many things in the queue.
4149 recv_msg = ipmi_alloc_recv_msg();
4150 if (!recv_msg) {
4152 * We couldn't allocate memory for the
4153 * message, so requeue it for handling
4154 * later.
4156 rv = 1;
4157 goto out;
4160 copy_event_into_recv_msg(recv_msg, msg);
4161 list_add_tail(&recv_msg->link, &intf->waiting_events);
4162 intf->waiting_events_count++;
4163 } else if (!intf->event_msg_printed) {
4165 * There's too many things in the queue, discard this
4166 * message.
4168 dev_warn(intf->si_dev,
4169 "Event queue full, discarding incoming events\n");
4170 intf->event_msg_printed = 1;
4173 out:
4174 spin_unlock_irqrestore(&intf->events_lock, flags);
4176 return rv;
4179 static int handle_bmc_rsp(struct ipmi_smi *intf,
4180 struct ipmi_smi_msg *msg)
4182 struct ipmi_recv_msg *recv_msg;
4183 struct ipmi_system_interface_addr *smi_addr;
4185 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4186 if (recv_msg == NULL) {
4187 dev_warn(intf->si_dev,
4188 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4189 return 0;
4192 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4193 recv_msg->msgid = msg->msgid;
4194 smi_addr = ((struct ipmi_system_interface_addr *)
4195 &recv_msg->addr);
4196 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4197 smi_addr->channel = IPMI_BMC_CHANNEL;
4198 smi_addr->lun = msg->rsp[0] & 3;
4199 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4200 recv_msg->msg.cmd = msg->rsp[1];
4201 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4202 recv_msg->msg.data = recv_msg->msg_data;
4203 recv_msg->msg.data_len = msg->rsp_size - 2;
4204 deliver_local_response(intf, recv_msg);
4206 return 0;
4210 * Handle a received message. Return 1 if the message should be requeued,
4211 * 0 if the message should be freed, or -1 if the message should not
4212 * be freed or requeued.
4214 static int handle_one_recv_msg(struct ipmi_smi *intf,
4215 struct ipmi_smi_msg *msg)
4217 int requeue;
4218 int chan;
4220 pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
4222 if ((msg->data_size >= 2)
4223 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4224 && (msg->data[1] == IPMI_SEND_MSG_CMD)
4225 && (msg->user_data == NULL)) {
4227 if (intf->in_shutdown)
4228 goto free_msg;
4231 * This is the local response to a command send, start
4232 * the timer for these. The user_data will not be
4233 * NULL if this is a response send, and we will let
4234 * response sends just go through.
4238 * Check for errors, if we get certain errors (ones
4239 * that mean basically we can try again later), we
4240 * ignore them and start the timer. Otherwise we
4241 * report the error immediately.
4243 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4244 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4245 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4246 && (msg->rsp[2] != IPMI_BUS_ERR)
4247 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4248 int ch = msg->rsp[3] & 0xf;
4249 struct ipmi_channel *chans;
4251 /* Got an error sending the message, handle it. */
4253 chans = READ_ONCE(intf->channel_list)->c;
4254 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4255 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4256 ipmi_inc_stat(intf, sent_lan_command_errs);
4257 else
4258 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4259 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4260 } else
4261 /* The message was sent, start the timer. */
4262 intf_start_seq_timer(intf, msg->msgid);
4263 free_msg:
4264 requeue = 0;
4265 goto out;
4267 } else if (msg->rsp_size < 2) {
4268 /* Message is too small to be correct. */
4269 dev_warn(intf->si_dev,
4270 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4271 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4273 /* Generate an error response for the message. */
4274 msg->rsp[0] = msg->data[0] | (1 << 2);
4275 msg->rsp[1] = msg->data[1];
4276 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4277 msg->rsp_size = 3;
4278 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4279 || (msg->rsp[1] != msg->data[1])) {
4281 * The NetFN and Command in the response is not even
4282 * marginally correct.
4284 dev_warn(intf->si_dev,
4285 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4286 (msg->data[0] >> 2) | 1, msg->data[1],
4287 msg->rsp[0] >> 2, msg->rsp[1]);
4289 /* Generate an error response for the message. */
4290 msg->rsp[0] = msg->data[0] | (1 << 2);
4291 msg->rsp[1] = msg->data[1];
4292 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4293 msg->rsp_size = 3;
4296 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4297 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4298 && (msg->user_data != NULL)) {
4300 * It's a response to a response we sent. For this we
4301 * deliver a send message response to the user.
4303 struct ipmi_recv_msg *recv_msg = msg->user_data;
4305 requeue = 0;
4306 if (msg->rsp_size < 2)
4307 /* Message is too small to be correct. */
4308 goto out;
4310 chan = msg->data[2] & 0x0f;
4311 if (chan >= IPMI_MAX_CHANNELS)
4312 /* Invalid channel number */
4313 goto out;
4315 if (!recv_msg)
4316 goto out;
4318 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4319 recv_msg->msg.data = recv_msg->msg_data;
4320 recv_msg->msg.data_len = 1;
4321 recv_msg->msg_data[0] = msg->rsp[2];
4322 deliver_local_response(intf, recv_msg);
4323 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4324 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4325 struct ipmi_channel *chans;
4327 /* It's from the receive queue. */
4328 chan = msg->rsp[3] & 0xf;
4329 if (chan >= IPMI_MAX_CHANNELS) {
4330 /* Invalid channel number */
4331 requeue = 0;
4332 goto out;
4336 * We need to make sure the channels have been initialized.
4337 * The channel_handler routine will set the "curr_channel"
4338 * equal to or greater than IPMI_MAX_CHANNELS when all the
4339 * channels for this interface have been initialized.
4341 if (!intf->channels_ready) {
4342 requeue = 0; /* Throw the message away */
4343 goto out;
4346 chans = READ_ONCE(intf->channel_list)->c;
4348 switch (chans[chan].medium) {
4349 case IPMI_CHANNEL_MEDIUM_IPMB:
4350 if (msg->rsp[4] & 0x04) {
4352 * It's a response, so find the
4353 * requesting message and send it up.
4355 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4356 } else {
4358 * It's a command to the SMS from some other
4359 * entity. Handle that.
4361 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4363 break;
4365 case IPMI_CHANNEL_MEDIUM_8023LAN:
4366 case IPMI_CHANNEL_MEDIUM_ASYNC:
4367 if (msg->rsp[6] & 0x04) {
4369 * It's a response, so find the
4370 * requesting message and send it up.
4372 requeue = handle_lan_get_msg_rsp(intf, msg);
4373 } else {
4375 * It's a command to the SMS from some other
4376 * entity. Handle that.
4378 requeue = handle_lan_get_msg_cmd(intf, msg);
4380 break;
4382 default:
4383 /* Check for OEM Channels. Clients had better
4384 register for these commands. */
4385 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4386 && (chans[chan].medium
4387 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4388 requeue = handle_oem_get_msg_cmd(intf, msg);
4389 } else {
4391 * We don't handle the channel type, so just
4392 * free the message.
4394 requeue = 0;
4398 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4399 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4400 /* It's an asynchronous event. */
4401 requeue = handle_read_event_rsp(intf, msg);
4402 } else {
4403 /* It's a response from the local BMC. */
4404 requeue = handle_bmc_rsp(intf, msg);
4407 out:
4408 return requeue;
4412 * If there are messages in the queue or pretimeouts, handle them.
4414 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4416 struct ipmi_smi_msg *smi_msg;
4417 unsigned long flags = 0;
4418 int rv;
4419 int run_to_completion = intf->run_to_completion;
4421 /* See if any waiting messages need to be processed. */
4422 if (!run_to_completion)
4423 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4424 while (!list_empty(&intf->waiting_rcv_msgs)) {
4425 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4426 struct ipmi_smi_msg, link);
4427 list_del(&smi_msg->link);
4428 if (!run_to_completion)
4429 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4430 flags);
4431 rv = handle_one_recv_msg(intf, smi_msg);
4432 if (!run_to_completion)
4433 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4434 if (rv > 0) {
4436 * To preserve message order, quit if we
4437 * can't handle a message. Add the message
4438 * back at the head, this is safe because this
4439 * tasklet is the only thing that pulls the
4440 * messages.
4442 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4443 break;
4444 } else {
4445 if (rv == 0)
4446 /* Message handled */
4447 ipmi_free_smi_msg(smi_msg);
4448 /* If rv < 0, fatal error, del but don't free. */
4451 if (!run_to_completion)
4452 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4455 * If the pretimout count is non-zero, decrement one from it and
4456 * deliver pretimeouts to all the users.
4458 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4459 struct ipmi_user *user;
4460 int index;
4462 index = srcu_read_lock(&intf->users_srcu);
4463 list_for_each_entry_rcu(user, &intf->users, link) {
4464 if (user->handler->ipmi_watchdog_pretimeout)
4465 user->handler->ipmi_watchdog_pretimeout(
4466 user->handler_data);
4468 srcu_read_unlock(&intf->users_srcu, index);
4472 static void smi_recv_tasklet(unsigned long val)
4474 unsigned long flags = 0; /* keep us warning-free. */
4475 struct ipmi_smi *intf = (struct ipmi_smi *) val;
4476 int run_to_completion = intf->run_to_completion;
4477 struct ipmi_smi_msg *newmsg = NULL;
4480 * Start the next message if available.
4482 * Do this here, not in the actual receiver, because we may deadlock
4483 * because the lower layer is allowed to hold locks while calling
4484 * message delivery.
4487 rcu_read_lock();
4489 if (!run_to_completion)
4490 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4491 if (intf->curr_msg == NULL && !intf->in_shutdown) {
4492 struct list_head *entry = NULL;
4494 /* Pick the high priority queue first. */
4495 if (!list_empty(&intf->hp_xmit_msgs))
4496 entry = intf->hp_xmit_msgs.next;
4497 else if (!list_empty(&intf->xmit_msgs))
4498 entry = intf->xmit_msgs.next;
4500 if (entry) {
4501 list_del(entry);
4502 newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4503 intf->curr_msg = newmsg;
4507 if (!run_to_completion)
4508 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4509 if (newmsg)
4510 intf->handlers->sender(intf->send_info, newmsg);
4512 rcu_read_unlock();
4514 handle_new_recv_msgs(intf);
4517 /* Handle a new message from the lower layer. */
4518 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4519 struct ipmi_smi_msg *msg)
4521 unsigned long flags = 0; /* keep us warning-free. */
4522 int run_to_completion = intf->run_to_completion;
4525 * To preserve message order, we keep a queue and deliver from
4526 * a tasklet.
4528 if (!run_to_completion)
4529 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4530 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4531 if (!run_to_completion)
4532 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4533 flags);
4535 if (!run_to_completion)
4536 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4538 * We can get an asynchronous event or receive message in addition
4539 * to commands we send.
4541 if (msg == intf->curr_msg)
4542 intf->curr_msg = NULL;
4543 if (!run_to_completion)
4544 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4546 if (run_to_completion)
4547 smi_recv_tasklet((unsigned long) intf);
4548 else
4549 tasklet_schedule(&intf->recv_tasklet);
4551 EXPORT_SYMBOL(ipmi_smi_msg_received);
4553 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4555 if (intf->in_shutdown)
4556 return;
4558 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4559 tasklet_schedule(&intf->recv_tasklet);
4561 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4563 static struct ipmi_smi_msg *
4564 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4565 unsigned char seq, long seqid)
4567 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4568 if (!smi_msg)
4570 * If we can't allocate the message, then just return, we
4571 * get 4 retries, so this should be ok.
4573 return NULL;
4575 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4576 smi_msg->data_size = recv_msg->msg.data_len;
4577 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4579 pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
4581 return smi_msg;
4584 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4585 struct list_head *timeouts,
4586 unsigned long timeout_period,
4587 int slot, unsigned long *flags,
4588 bool *need_timer)
4590 struct ipmi_recv_msg *msg;
4592 if (intf->in_shutdown)
4593 return;
4595 if (!ent->inuse)
4596 return;
4598 if (timeout_period < ent->timeout) {
4599 ent->timeout -= timeout_period;
4600 *need_timer = true;
4601 return;
4604 if (ent->retries_left == 0) {
4605 /* The message has used all its retries. */
4606 ent->inuse = 0;
4607 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4608 msg = ent->recv_msg;
4609 list_add_tail(&msg->link, timeouts);
4610 if (ent->broadcast)
4611 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4612 else if (is_lan_addr(&ent->recv_msg->addr))
4613 ipmi_inc_stat(intf, timed_out_lan_commands);
4614 else
4615 ipmi_inc_stat(intf, timed_out_ipmb_commands);
4616 } else {
4617 struct ipmi_smi_msg *smi_msg;
4618 /* More retries, send again. */
4620 *need_timer = true;
4623 * Start with the max timer, set to normal timer after
4624 * the message is sent.
4626 ent->timeout = MAX_MSG_TIMEOUT;
4627 ent->retries_left--;
4628 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4629 ent->seqid);
4630 if (!smi_msg) {
4631 if (is_lan_addr(&ent->recv_msg->addr))
4632 ipmi_inc_stat(intf,
4633 dropped_rexmit_lan_commands);
4634 else
4635 ipmi_inc_stat(intf,
4636 dropped_rexmit_ipmb_commands);
4637 return;
4640 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4643 * Send the new message. We send with a zero
4644 * priority. It timed out, I doubt time is that
4645 * critical now, and high priority messages are really
4646 * only for messages to the local MC, which don't get
4647 * resent.
4649 if (intf->handlers) {
4650 if (is_lan_addr(&ent->recv_msg->addr))
4651 ipmi_inc_stat(intf,
4652 retransmitted_lan_commands);
4653 else
4654 ipmi_inc_stat(intf,
4655 retransmitted_ipmb_commands);
4657 smi_send(intf, intf->handlers, smi_msg, 0);
4658 } else
4659 ipmi_free_smi_msg(smi_msg);
4661 spin_lock_irqsave(&intf->seq_lock, *flags);
4665 static bool ipmi_timeout_handler(struct ipmi_smi *intf,
4666 unsigned long timeout_period)
4668 struct list_head timeouts;
4669 struct ipmi_recv_msg *msg, *msg2;
4670 unsigned long flags;
4671 int i;
4672 bool need_timer = false;
4674 if (!intf->bmc_registered) {
4675 kref_get(&intf->refcount);
4676 if (!schedule_work(&intf->bmc_reg_work)) {
4677 kref_put(&intf->refcount, intf_free);
4678 need_timer = true;
4683 * Go through the seq table and find any messages that
4684 * have timed out, putting them in the timeouts
4685 * list.
4687 INIT_LIST_HEAD(&timeouts);
4688 spin_lock_irqsave(&intf->seq_lock, flags);
4689 if (intf->ipmb_maintenance_mode_timeout) {
4690 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4691 intf->ipmb_maintenance_mode_timeout = 0;
4692 else
4693 intf->ipmb_maintenance_mode_timeout -= timeout_period;
4695 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4696 check_msg_timeout(intf, &intf->seq_table[i],
4697 &timeouts, timeout_period, i,
4698 &flags, &need_timer);
4699 spin_unlock_irqrestore(&intf->seq_lock, flags);
4701 list_for_each_entry_safe(msg, msg2, &timeouts, link)
4702 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4705 * Maintenance mode handling. Check the timeout
4706 * optimistically before we claim the lock. It may
4707 * mean a timeout gets missed occasionally, but that
4708 * only means the timeout gets extended by one period
4709 * in that case. No big deal, and it avoids the lock
4710 * most of the time.
4712 if (intf->auto_maintenance_timeout > 0) {
4713 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4714 if (intf->auto_maintenance_timeout > 0) {
4715 intf->auto_maintenance_timeout
4716 -= timeout_period;
4717 if (!intf->maintenance_mode
4718 && (intf->auto_maintenance_timeout <= 0)) {
4719 intf->maintenance_mode_enable = false;
4720 maintenance_mode_update(intf);
4723 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4724 flags);
4727 tasklet_schedule(&intf->recv_tasklet);
4729 return need_timer;
4732 static void ipmi_request_event(struct ipmi_smi *intf)
4734 /* No event requests when in maintenance mode. */
4735 if (intf->maintenance_mode_enable)
4736 return;
4738 if (!intf->in_shutdown)
4739 intf->handlers->request_events(intf->send_info);
4742 static struct timer_list ipmi_timer;
4744 static atomic_t stop_operation;
4746 static void ipmi_timeout(struct timer_list *unused)
4748 struct ipmi_smi *intf;
4749 bool need_timer = false;
4750 int index;
4752 if (atomic_read(&stop_operation))
4753 return;
4755 index = srcu_read_lock(&ipmi_interfaces_srcu);
4756 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4757 if (atomic_read(&intf->event_waiters)) {
4758 intf->ticks_to_req_ev--;
4759 if (intf->ticks_to_req_ev == 0) {
4760 ipmi_request_event(intf);
4761 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4763 need_timer = true;
4766 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4768 srcu_read_unlock(&ipmi_interfaces_srcu, index);
4770 if (need_timer)
4771 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4774 static void need_waiter(struct ipmi_smi *intf)
4776 /* Racy, but worst case we start the timer twice. */
4777 if (!timer_pending(&ipmi_timer))
4778 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4781 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4782 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4784 static void free_smi_msg(struct ipmi_smi_msg *msg)
4786 atomic_dec(&smi_msg_inuse_count);
4787 kfree(msg);
4790 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4792 struct ipmi_smi_msg *rv;
4793 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4794 if (rv) {
4795 rv->done = free_smi_msg;
4796 rv->user_data = NULL;
4797 atomic_inc(&smi_msg_inuse_count);
4799 return rv;
4801 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4803 static void free_recv_msg(struct ipmi_recv_msg *msg)
4805 atomic_dec(&recv_msg_inuse_count);
4806 kfree(msg);
4809 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4811 struct ipmi_recv_msg *rv;
4813 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4814 if (rv) {
4815 rv->user = NULL;
4816 rv->done = free_recv_msg;
4817 atomic_inc(&recv_msg_inuse_count);
4819 return rv;
4822 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4824 if (msg->user)
4825 kref_put(&msg->user->refcount, free_user);
4826 msg->done(msg);
4828 EXPORT_SYMBOL(ipmi_free_recv_msg);
4830 static atomic_t panic_done_count = ATOMIC_INIT(0);
4832 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4834 atomic_dec(&panic_done_count);
4837 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4839 atomic_dec(&panic_done_count);
4843 * Inside a panic, send a message and wait for a response.
4845 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4846 struct ipmi_addr *addr,
4847 struct kernel_ipmi_msg *msg)
4849 struct ipmi_smi_msg smi_msg;
4850 struct ipmi_recv_msg recv_msg;
4851 int rv;
4853 smi_msg.done = dummy_smi_done_handler;
4854 recv_msg.done = dummy_recv_done_handler;
4855 atomic_add(2, &panic_done_count);
4856 rv = i_ipmi_request(NULL,
4857 intf,
4858 addr,
4860 msg,
4861 intf,
4862 &smi_msg,
4863 &recv_msg,
4865 intf->addrinfo[0].address,
4866 intf->addrinfo[0].lun,
4867 0, 1); /* Don't retry, and don't wait. */
4868 if (rv)
4869 atomic_sub(2, &panic_done_count);
4870 else if (intf->handlers->flush_messages)
4871 intf->handlers->flush_messages(intf->send_info);
4873 while (atomic_read(&panic_done_count) != 0)
4874 ipmi_poll(intf);
4877 static void event_receiver_fetcher(struct ipmi_smi *intf,
4878 struct ipmi_recv_msg *msg)
4880 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4881 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4882 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4883 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4884 /* A get event receiver command, save it. */
4885 intf->event_receiver = msg->msg.data[1];
4886 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4890 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4892 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4893 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4894 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4895 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4897 * A get device id command, save if we are an event
4898 * receiver or generator.
4900 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4901 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4905 static void send_panic_events(struct ipmi_smi *intf, char *str)
4907 struct kernel_ipmi_msg msg;
4908 unsigned char data[16];
4909 struct ipmi_system_interface_addr *si;
4910 struct ipmi_addr addr;
4911 char *p = str;
4912 struct ipmi_ipmb_addr *ipmb;
4913 int j;
4915 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4916 return;
4918 si = (struct ipmi_system_interface_addr *) &addr;
4919 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4920 si->channel = IPMI_BMC_CHANNEL;
4921 si->lun = 0;
4923 /* Fill in an event telling that we have failed. */
4924 msg.netfn = 0x04; /* Sensor or Event. */
4925 msg.cmd = 2; /* Platform event command. */
4926 msg.data = data;
4927 msg.data_len = 8;
4928 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4929 data[1] = 0x03; /* This is for IPMI 1.0. */
4930 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4931 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4932 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4935 * Put a few breadcrumbs in. Hopefully later we can add more things
4936 * to make the panic events more useful.
4938 if (str) {
4939 data[3] = str[0];
4940 data[6] = str[1];
4941 data[7] = str[2];
4944 /* Send the event announcing the panic. */
4945 ipmi_panic_request_and_wait(intf, &addr, &msg);
4948 * On every interface, dump a bunch of OEM event holding the
4949 * string.
4951 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4952 return;
4955 * intf_num is used as an marker to tell if the
4956 * interface is valid. Thus we need a read barrier to
4957 * make sure data fetched before checking intf_num
4958 * won't be used.
4960 smp_rmb();
4963 * First job here is to figure out where to send the
4964 * OEM events. There's no way in IPMI to send OEM
4965 * events using an event send command, so we have to
4966 * find the SEL to put them in and stick them in
4967 * there.
4970 /* Get capabilities from the get device id. */
4971 intf->local_sel_device = 0;
4972 intf->local_event_generator = 0;
4973 intf->event_receiver = 0;
4975 /* Request the device info from the local MC. */
4976 msg.netfn = IPMI_NETFN_APP_REQUEST;
4977 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4978 msg.data = NULL;
4979 msg.data_len = 0;
4980 intf->null_user_handler = device_id_fetcher;
4981 ipmi_panic_request_and_wait(intf, &addr, &msg);
4983 if (intf->local_event_generator) {
4984 /* Request the event receiver from the local MC. */
4985 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4986 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4987 msg.data = NULL;
4988 msg.data_len = 0;
4989 intf->null_user_handler = event_receiver_fetcher;
4990 ipmi_panic_request_and_wait(intf, &addr, &msg);
4992 intf->null_user_handler = NULL;
4995 * Validate the event receiver. The low bit must not
4996 * be 1 (it must be a valid IPMB address), it cannot
4997 * be zero, and it must not be my address.
4999 if (((intf->event_receiver & 1) == 0)
5000 && (intf->event_receiver != 0)
5001 && (intf->event_receiver != intf->addrinfo[0].address)) {
5003 * The event receiver is valid, send an IPMB
5004 * message.
5006 ipmb = (struct ipmi_ipmb_addr *) &addr;
5007 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5008 ipmb->channel = 0; /* FIXME - is this right? */
5009 ipmb->lun = intf->event_receiver_lun;
5010 ipmb->slave_addr = intf->event_receiver;
5011 } else if (intf->local_sel_device) {
5013 * The event receiver was not valid (or was
5014 * me), but I am an SEL device, just dump it
5015 * in my SEL.
5017 si = (struct ipmi_system_interface_addr *) &addr;
5018 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5019 si->channel = IPMI_BMC_CHANNEL;
5020 si->lun = 0;
5021 } else
5022 return; /* No where to send the event. */
5024 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5025 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5026 msg.data = data;
5027 msg.data_len = 16;
5029 j = 0;
5030 while (*p) {
5031 int size = strlen(p);
5033 if (size > 11)
5034 size = 11;
5035 data[0] = 0;
5036 data[1] = 0;
5037 data[2] = 0xf0; /* OEM event without timestamp. */
5038 data[3] = intf->addrinfo[0].address;
5039 data[4] = j++; /* sequence # */
5041 * Always give 11 bytes, so strncpy will fill
5042 * it with zeroes for me.
5044 strncpy(data+5, p, 11);
5045 p += size;
5047 ipmi_panic_request_and_wait(intf, &addr, &msg);
5051 static int has_panicked;
5053 static int panic_event(struct notifier_block *this,
5054 unsigned long event,
5055 void *ptr)
5057 struct ipmi_smi *intf;
5058 struct ipmi_user *user;
5060 if (has_panicked)
5061 return NOTIFY_DONE;
5062 has_panicked = 1;
5064 /* For every registered interface, set it to run to completion. */
5065 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5066 if (!intf->handlers || intf->intf_num == -1)
5067 /* Interface is not ready. */
5068 continue;
5070 if (!intf->handlers->poll)
5071 continue;
5074 * If we were interrupted while locking xmit_msgs_lock or
5075 * waiting_rcv_msgs_lock, the corresponding list may be
5076 * corrupted. In this case, drop items on the list for
5077 * the safety.
5079 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5080 INIT_LIST_HEAD(&intf->xmit_msgs);
5081 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5082 } else
5083 spin_unlock(&intf->xmit_msgs_lock);
5085 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5086 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5087 else
5088 spin_unlock(&intf->waiting_rcv_msgs_lock);
5090 intf->run_to_completion = 1;
5091 if (intf->handlers->set_run_to_completion)
5092 intf->handlers->set_run_to_completion(intf->send_info,
5095 list_for_each_entry_rcu(user, &intf->users, link) {
5096 if (user->handler->ipmi_panic_handler)
5097 user->handler->ipmi_panic_handler(
5098 user->handler_data);
5101 send_panic_events(intf, ptr);
5104 return NOTIFY_DONE;
5107 /* Must be called with ipmi_interfaces_mutex held. */
5108 static int ipmi_register_driver(void)
5110 int rv;
5112 if (drvregistered)
5113 return 0;
5115 rv = driver_register(&ipmidriver.driver);
5116 if (rv)
5117 pr_err("Could not register IPMI driver\n");
5118 else
5119 drvregistered = true;
5120 return rv;
5123 static struct notifier_block panic_block = {
5124 .notifier_call = panic_event,
5125 .next = NULL,
5126 .priority = 200 /* priority: INT_MAX >= x >= 0 */
5129 static int ipmi_init_msghandler(void)
5131 int rv;
5133 mutex_lock(&ipmi_interfaces_mutex);
5134 rv = ipmi_register_driver();
5135 if (rv)
5136 goto out;
5137 if (initialized)
5138 goto out;
5140 init_srcu_struct(&ipmi_interfaces_srcu);
5142 timer_setup(&ipmi_timer, ipmi_timeout, 0);
5143 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5145 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5147 initialized = true;
5149 out:
5150 mutex_unlock(&ipmi_interfaces_mutex);
5151 return rv;
5154 static int __init ipmi_init_msghandler_mod(void)
5156 int rv;
5158 pr_info("version " IPMI_DRIVER_VERSION "\n");
5160 mutex_lock(&ipmi_interfaces_mutex);
5161 rv = ipmi_register_driver();
5162 mutex_unlock(&ipmi_interfaces_mutex);
5164 return rv;
5167 static void __exit cleanup_ipmi(void)
5169 int count;
5171 if (initialized) {
5172 atomic_notifier_chain_unregister(&panic_notifier_list,
5173 &panic_block);
5176 * This can't be called if any interfaces exist, so no worry
5177 * about shutting down the interfaces.
5181 * Tell the timer to stop, then wait for it to stop. This
5182 * avoids problems with race conditions removing the timer
5183 * here.
5185 atomic_set(&stop_operation, 1);
5186 del_timer_sync(&ipmi_timer);
5188 initialized = false;
5190 /* Check for buffer leaks. */
5191 count = atomic_read(&smi_msg_inuse_count);
5192 if (count != 0)
5193 pr_warn("SMI message count %d at exit\n", count);
5194 count = atomic_read(&recv_msg_inuse_count);
5195 if (count != 0)
5196 pr_warn("recv message count %d at exit\n", count);
5198 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5200 if (drvregistered)
5201 driver_unregister(&ipmidriver.driver);
5203 module_exit(cleanup_ipmi);
5205 module_init(ipmi_init_msghandler_mod);
5206 MODULE_LICENSE("GPL");
5207 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5208 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5209 " interface.");
5210 MODULE_VERSION(IPMI_DRIVER_VERSION);
5211 MODULE_SOFTDEP("post: ipmi_devintf");