1 // SPDX-License-Identifier: GPL-2.0+
5 * Incoming and outgoing message routing for an IPMI interface.
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
15 #define dev_fmt pr_fmt
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/spinlock.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/ipmi.h>
26 #include <linux/ipmi_smi.h>
27 #include <linux/notifier.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/rcupdate.h>
31 #include <linux/interrupt.h>
32 #include <linux/moduleparam.h>
33 #include <linux/workqueue.h>
34 #include <linux/uuid.h>
35 #include <linux/nospec.h>
36 #include <linux/vmalloc.h>
38 #define IPMI_DRIVER_VERSION "39.2"
40 static struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void);
41 static int ipmi_init_msghandler(void);
42 static void smi_recv_tasklet(unsigned long);
43 static void handle_new_recv_msgs(struct ipmi_smi
*intf
);
44 static void need_waiter(struct ipmi_smi
*intf
);
45 static int handle_one_recv_msg(struct ipmi_smi
*intf
,
46 struct ipmi_smi_msg
*msg
);
48 static bool initialized
;
49 static bool drvregistered
;
51 enum ipmi_panic_event_op
{
52 IPMI_SEND_PANIC_EVENT_NONE
,
53 IPMI_SEND_PANIC_EVENT
,
54 IPMI_SEND_PANIC_EVENT_STRING
56 #ifdef CONFIG_IPMI_PANIC_STRING
57 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
58 #elif defined(CONFIG_IPMI_PANIC_EVENT)
59 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
61 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
63 static enum ipmi_panic_event_op ipmi_send_panic_event
= IPMI_PANIC_DEFAULT
;
65 static int panic_op_write_handler(const char *val
,
66 const struct kernel_param
*kp
)
71 strncpy(valcp
, val
, 15);
76 if (strcmp(s
, "none") == 0)
77 ipmi_send_panic_event
= IPMI_SEND_PANIC_EVENT_NONE
;
78 else if (strcmp(s
, "event") == 0)
79 ipmi_send_panic_event
= IPMI_SEND_PANIC_EVENT
;
80 else if (strcmp(s
, "string") == 0)
81 ipmi_send_panic_event
= IPMI_SEND_PANIC_EVENT_STRING
;
88 static int panic_op_read_handler(char *buffer
, const struct kernel_param
*kp
)
90 switch (ipmi_send_panic_event
) {
91 case IPMI_SEND_PANIC_EVENT_NONE
:
92 strcpy(buffer
, "none");
95 case IPMI_SEND_PANIC_EVENT
:
96 strcpy(buffer
, "event");
99 case IPMI_SEND_PANIC_EVENT_STRING
:
100 strcpy(buffer
, "string");
104 strcpy(buffer
, "???");
108 return strlen(buffer
);
111 static const struct kernel_param_ops panic_op_ops
= {
112 .set
= panic_op_write_handler
,
113 .get
= panic_op_read_handler
115 module_param_cb(panic_op
, &panic_op_ops
, NULL
, 0600);
116 MODULE_PARM_DESC(panic_op
, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
119 #define MAX_EVENTS_IN_QUEUE 25
121 /* Remain in auto-maintenance mode for this amount of time (in ms). */
122 static unsigned long maintenance_mode_timeout_ms
= 30000;
123 module_param(maintenance_mode_timeout_ms
, ulong
, 0644);
124 MODULE_PARM_DESC(maintenance_mode_timeout_ms
,
125 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
128 * Don't let a message sit in a queue forever, always time it with at lest
129 * the max message timer. This is in milliseconds.
131 #define MAX_MSG_TIMEOUT 60000
134 * Timeout times below are in milliseconds, and are done off a 1
135 * second timer. So setting the value to 1000 would mean anything
136 * between 0 and 1000ms. So really the only reasonable minimum
137 * setting it 2000ms, which is between 1 and 2 seconds.
140 /* The default timeout for message retries. */
141 static unsigned long default_retry_ms
= 2000;
142 module_param(default_retry_ms
, ulong
, 0644);
143 MODULE_PARM_DESC(default_retry_ms
,
144 "The time (milliseconds) between retry sends");
146 /* The default timeout for maintenance mode message retries. */
147 static unsigned long default_maintenance_retry_ms
= 3000;
148 module_param(default_maintenance_retry_ms
, ulong
, 0644);
149 MODULE_PARM_DESC(default_maintenance_retry_ms
,
150 "The time (milliseconds) between retry sends in maintenance mode");
152 /* The default maximum number of retries */
153 static unsigned int default_max_retries
= 4;
154 module_param(default_max_retries
, uint
, 0644);
155 MODULE_PARM_DESC(default_max_retries
,
156 "The time (milliseconds) between retry sends in maintenance mode");
158 /* Call every ~1000 ms. */
159 #define IPMI_TIMEOUT_TIME 1000
161 /* How many jiffies does it take to get to the timeout time. */
162 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
165 * Request events from the queue every second (this is the number of
166 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
167 * future, IPMI will add a way to know immediately if an event is in
168 * the queue and this silliness can go away.
170 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
172 /* How long should we cache dynamic device IDs? */
173 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
176 * The main "user" data structure.
179 struct list_head link
;
182 * Set to NULL when the user is destroyed, a pointer to myself
183 * so srcu_dereference can be used on it.
185 struct ipmi_user
*self
;
186 struct srcu_struct release_barrier
;
188 struct kref refcount
;
190 /* The upper layer that handles receive messages. */
191 const struct ipmi_user_hndl
*handler
;
194 /* The interface this user is bound to. */
195 struct ipmi_smi
*intf
;
197 /* Does this interface receive IPMI events? */
200 /* Free must run in process context for RCU cleanup. */
201 struct work_struct remove_work
;
204 static struct ipmi_user
*acquire_ipmi_user(struct ipmi_user
*user
, int *index
)
205 __acquires(user
->release_barrier
)
207 struct ipmi_user
*ruser
;
209 *index
= srcu_read_lock(&user
->release_barrier
);
210 ruser
= srcu_dereference(user
->self
, &user
->release_barrier
);
212 srcu_read_unlock(&user
->release_barrier
, *index
);
216 static void release_ipmi_user(struct ipmi_user
*user
, int index
)
218 srcu_read_unlock(&user
->release_barrier
, index
);
222 struct list_head link
;
224 struct ipmi_user
*user
;
230 * This is used to form a linked lised during mass deletion.
231 * Since this is in an RCU list, we cannot use the link above
232 * or change any data until the RCU period completes. So we
233 * use this next variable during mass deletion so we can have
234 * a list and don't have to wait and restart the search on
235 * every individual deletion of a command.
237 struct cmd_rcvr
*next
;
241 unsigned int inuse
: 1;
242 unsigned int broadcast
: 1;
244 unsigned long timeout
;
245 unsigned long orig_timeout
;
246 unsigned int retries_left
;
249 * To verify on an incoming send message response that this is
250 * the message that the response is for, we keep a sequence id
251 * and increment it every time we send a message.
256 * This is held so we can properly respond to the message on a
257 * timeout, and it is used to hold the temporary data for
258 * retransmission, too.
260 struct ipmi_recv_msg
*recv_msg
;
264 * Store the information in a msgid (long) to allow us to find a
265 * sequence table entry from the msgid.
267 #define STORE_SEQ_IN_MSGID(seq, seqid) \
268 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
270 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
272 seq = (((msgid) >> 26) & 0x3f); \
273 seqid = ((msgid) & 0x3ffffff); \
276 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
278 #define IPMI_MAX_CHANNELS 16
279 struct ipmi_channel
{
280 unsigned char medium
;
281 unsigned char protocol
;
284 struct ipmi_channel_set
{
285 struct ipmi_channel c
[IPMI_MAX_CHANNELS
];
288 struct ipmi_my_addrinfo
{
290 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
291 * but may be changed by the user.
293 unsigned char address
;
296 * My LUN. This should generally stay the SMS LUN, but just in
303 * Note that the product id, manufacturer id, guid, and device id are
304 * immutable in this structure, so dyn_mutex is not required for
305 * accessing those. If those change on a BMC, a new BMC is allocated.
308 struct platform_device pdev
;
309 struct list_head intfs
; /* Interfaces on this BMC. */
310 struct ipmi_device_id id
;
311 struct ipmi_device_id fetch_id
;
313 unsigned long dyn_id_expiry
;
314 struct mutex dyn_mutex
; /* Protects id, intfs, & dyn* */
318 struct kref usecount
;
319 struct work_struct remove_work
;
321 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
323 static int bmc_get_device_id(struct ipmi_smi
*intf
, struct bmc_device
*bmc
,
324 struct ipmi_device_id
*id
,
325 bool *guid_set
, guid_t
*guid
);
328 * Various statistics for IPMI, these index stats[] in the ipmi_smi
331 enum ipmi_stat_indexes
{
332 /* Commands we got from the user that were invalid. */
333 IPMI_STAT_sent_invalid_commands
= 0,
335 /* Commands we sent to the MC. */
336 IPMI_STAT_sent_local_commands
,
338 /* Responses from the MC that were delivered to a user. */
339 IPMI_STAT_handled_local_responses
,
341 /* Responses from the MC that were not delivered to a user. */
342 IPMI_STAT_unhandled_local_responses
,
344 /* Commands we sent out to the IPMB bus. */
345 IPMI_STAT_sent_ipmb_commands
,
347 /* Commands sent on the IPMB that had errors on the SEND CMD */
348 IPMI_STAT_sent_ipmb_command_errs
,
350 /* Each retransmit increments this count. */
351 IPMI_STAT_retransmitted_ipmb_commands
,
354 * When a message times out (runs out of retransmits) this is
357 IPMI_STAT_timed_out_ipmb_commands
,
360 * This is like above, but for broadcasts. Broadcasts are
361 * *not* included in the above count (they are expected to
364 IPMI_STAT_timed_out_ipmb_broadcasts
,
366 /* Responses I have sent to the IPMB bus. */
367 IPMI_STAT_sent_ipmb_responses
,
369 /* The response was delivered to the user. */
370 IPMI_STAT_handled_ipmb_responses
,
372 /* The response had invalid data in it. */
373 IPMI_STAT_invalid_ipmb_responses
,
375 /* The response didn't have anyone waiting for it. */
376 IPMI_STAT_unhandled_ipmb_responses
,
378 /* Commands we sent out to the IPMB bus. */
379 IPMI_STAT_sent_lan_commands
,
381 /* Commands sent on the IPMB that had errors on the SEND CMD */
382 IPMI_STAT_sent_lan_command_errs
,
384 /* Each retransmit increments this count. */
385 IPMI_STAT_retransmitted_lan_commands
,
388 * When a message times out (runs out of retransmits) this is
391 IPMI_STAT_timed_out_lan_commands
,
393 /* Responses I have sent to the IPMB bus. */
394 IPMI_STAT_sent_lan_responses
,
396 /* The response was delivered to the user. */
397 IPMI_STAT_handled_lan_responses
,
399 /* The response had invalid data in it. */
400 IPMI_STAT_invalid_lan_responses
,
402 /* The response didn't have anyone waiting for it. */
403 IPMI_STAT_unhandled_lan_responses
,
405 /* The command was delivered to the user. */
406 IPMI_STAT_handled_commands
,
408 /* The command had invalid data in it. */
409 IPMI_STAT_invalid_commands
,
411 /* The command didn't have anyone waiting for it. */
412 IPMI_STAT_unhandled_commands
,
414 /* Invalid data in an event. */
415 IPMI_STAT_invalid_events
,
417 /* Events that were received with the proper format. */
420 /* Retransmissions on IPMB that failed. */
421 IPMI_STAT_dropped_rexmit_ipmb_commands
,
423 /* Retransmissions on LAN that failed. */
424 IPMI_STAT_dropped_rexmit_lan_commands
,
426 /* This *must* remain last, add new values above this. */
431 #define IPMI_IPMB_NUM_SEQ 64
433 struct module
*owner
;
435 /* What interface number are we? */
438 struct kref refcount
;
440 /* Set when the interface is being unregistered. */
443 /* Used for a list of interfaces. */
444 struct list_head link
;
447 * The list of upper layers that are using me. seq_lock write
448 * protects this. Read protection is with srcu.
450 struct list_head users
;
451 struct srcu_struct users_srcu
;
453 /* Used for wake ups at startup. */
454 wait_queue_head_t waitq
;
457 * Prevents the interface from being unregistered when the
458 * interface is used by being looked up through the BMC
461 struct mutex bmc_reg_mutex
;
463 struct bmc_device tmp_bmc
;
464 struct bmc_device
*bmc
;
466 struct list_head bmc_link
;
468 bool in_bmc_register
; /* Handle recursive situations. Yuck. */
469 struct work_struct bmc_reg_work
;
471 const struct ipmi_smi_handlers
*handlers
;
474 /* Driver-model device for the system interface. */
475 struct device
*si_dev
;
478 * A table of sequence numbers for this interface. We use the
479 * sequence numbers for IPMB messages that go out of the
480 * interface to match them up with their responses. A routine
481 * is called periodically to time the items in this list.
484 struct seq_table seq_table
[IPMI_IPMB_NUM_SEQ
];
488 * Messages queued for delivery. If delivery fails (out of memory
489 * for instance), They will stay in here to be processed later in a
490 * periodic timer interrupt. The tasklet is for handling received
491 * messages directly from the handler.
493 spinlock_t waiting_rcv_msgs_lock
;
494 struct list_head waiting_rcv_msgs
;
495 atomic_t watchdog_pretimeouts_to_deliver
;
496 struct tasklet_struct recv_tasklet
;
498 spinlock_t xmit_msgs_lock
;
499 struct list_head xmit_msgs
;
500 struct ipmi_smi_msg
*curr_msg
;
501 struct list_head hp_xmit_msgs
;
504 * The list of command receivers that are registered for commands
507 struct mutex cmd_rcvrs_mutex
;
508 struct list_head cmd_rcvrs
;
511 * Events that were queues because no one was there to receive
514 spinlock_t events_lock
; /* For dealing with event stuff. */
515 struct list_head waiting_events
;
516 unsigned int waiting_events_count
; /* How many events in queue? */
517 char delivering_events
;
518 char event_msg_printed
;
520 /* How many users are waiting for events? */
521 atomic_t event_waiters
;
522 unsigned int ticks_to_req_ev
;
524 spinlock_t watch_lock
; /* For dealing with watch stuff below. */
526 /* How many users are waiting for commands? */
527 unsigned int command_waiters
;
529 /* How many users are waiting for watchdogs? */
530 unsigned int watchdog_waiters
;
532 /* How many users are waiting for message responses? */
533 unsigned int response_waiters
;
536 * Tells what the lower layer has last been asked to watch for,
537 * messages and/or watchdogs. Protected by watch_lock.
539 unsigned int last_watch_mask
;
542 * The event receiver for my BMC, only really used at panic
543 * shutdown as a place to store this.
545 unsigned char event_receiver
;
546 unsigned char event_receiver_lun
;
547 unsigned char local_sel_device
;
548 unsigned char local_event_generator
;
550 /* For handling of maintenance mode. */
551 int maintenance_mode
;
552 bool maintenance_mode_enable
;
553 int auto_maintenance_timeout
;
554 spinlock_t maintenance_mode_lock
; /* Used in a timer... */
557 * If we are doing maintenance on something on IPMB, extend
558 * the timeout time to avoid timeouts writing firmware and
561 int ipmb_maintenance_mode_timeout
;
564 * A cheap hack, if this is non-null and a message to an
565 * interface comes in with a NULL user, call this routine with
566 * it. Note that the message will still be freed by the
567 * caller. This only works on the system interface.
569 * Protected by bmc_reg_mutex.
571 void (*null_user_handler
)(struct ipmi_smi
*intf
,
572 struct ipmi_recv_msg
*msg
);
575 * When we are scanning the channels for an SMI, this will
576 * tell which channel we are scanning.
580 /* Channel information */
581 struct ipmi_channel_set
*channel_list
;
582 unsigned int curr_working_cset
; /* First index into the following. */
583 struct ipmi_channel_set wchannels
[2];
584 struct ipmi_my_addrinfo addrinfo
[IPMI_MAX_CHANNELS
];
587 atomic_t stats
[IPMI_NUM_STATS
];
590 * run_to_completion duplicate of smb_info, smi_info
591 * and ipmi_serial_info structures. Used to decrease numbers of
592 * parameters passed by "low" level IPMI code.
594 int run_to_completion
;
596 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
598 static void __get_guid(struct ipmi_smi
*intf
);
599 static void __ipmi_bmc_unregister(struct ipmi_smi
*intf
);
600 static int __ipmi_bmc_register(struct ipmi_smi
*intf
,
601 struct ipmi_device_id
*id
,
602 bool guid_set
, guid_t
*guid
, int intf_num
);
603 static int __scan_channels(struct ipmi_smi
*intf
, struct ipmi_device_id
*id
);
607 * The driver model view of the IPMI messaging driver.
609 static struct platform_driver ipmidriver
= {
612 .bus
= &platform_bus_type
616 * This mutex keeps us from adding the same BMC twice.
618 static DEFINE_MUTEX(ipmidriver_mutex
);
620 static LIST_HEAD(ipmi_interfaces
);
621 static DEFINE_MUTEX(ipmi_interfaces_mutex
);
622 #define ipmi_interfaces_mutex_held() \
623 lockdep_is_held(&ipmi_interfaces_mutex)
624 static struct srcu_struct ipmi_interfaces_srcu
;
627 * List of watchers that want to know when smi's are added and deleted.
629 static LIST_HEAD(smi_watchers
);
630 static DEFINE_MUTEX(smi_watchers_mutex
);
632 #define ipmi_inc_stat(intf, stat) \
633 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
634 #define ipmi_get_stat(intf, stat) \
635 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
637 static const char * const addr_src_to_str
[] = {
638 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
639 "device-tree", "platform"
642 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src
)
645 src
= 0; /* Invalid */
646 return addr_src_to_str
[src
];
648 EXPORT_SYMBOL(ipmi_addr_src_to_str
);
650 static int is_lan_addr(struct ipmi_addr
*addr
)
652 return addr
->addr_type
== IPMI_LAN_ADDR_TYPE
;
655 static int is_ipmb_addr(struct ipmi_addr
*addr
)
657 return addr
->addr_type
== IPMI_IPMB_ADDR_TYPE
;
660 static int is_ipmb_bcast_addr(struct ipmi_addr
*addr
)
662 return addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
;
665 static void free_recv_msg_list(struct list_head
*q
)
667 struct ipmi_recv_msg
*msg
, *msg2
;
669 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
670 list_del(&msg
->link
);
671 ipmi_free_recv_msg(msg
);
675 static void free_smi_msg_list(struct list_head
*q
)
677 struct ipmi_smi_msg
*msg
, *msg2
;
679 list_for_each_entry_safe(msg
, msg2
, q
, link
) {
680 list_del(&msg
->link
);
681 ipmi_free_smi_msg(msg
);
685 static void clean_up_interface_data(struct ipmi_smi
*intf
)
688 struct cmd_rcvr
*rcvr
, *rcvr2
;
689 struct list_head list
;
691 tasklet_kill(&intf
->recv_tasklet
);
693 free_smi_msg_list(&intf
->waiting_rcv_msgs
);
694 free_recv_msg_list(&intf
->waiting_events
);
697 * Wholesale remove all the entries from the list in the
698 * interface and wait for RCU to know that none are in use.
700 mutex_lock(&intf
->cmd_rcvrs_mutex
);
701 INIT_LIST_HEAD(&list
);
702 list_splice_init_rcu(&intf
->cmd_rcvrs
, &list
, synchronize_rcu
);
703 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
705 list_for_each_entry_safe(rcvr
, rcvr2
, &list
, link
)
708 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
709 if ((intf
->seq_table
[i
].inuse
)
710 && (intf
->seq_table
[i
].recv_msg
))
711 ipmi_free_recv_msg(intf
->seq_table
[i
].recv_msg
);
715 static void intf_free(struct kref
*ref
)
717 struct ipmi_smi
*intf
= container_of(ref
, struct ipmi_smi
, refcount
);
719 clean_up_interface_data(intf
);
723 struct watcher_entry
{
725 struct ipmi_smi
*intf
;
726 struct list_head link
;
729 int ipmi_smi_watcher_register(struct ipmi_smi_watcher
*watcher
)
731 struct ipmi_smi
*intf
;
735 * Make sure the driver is actually initialized, this handles
736 * problems with initialization order.
738 rv
= ipmi_init_msghandler();
742 mutex_lock(&smi_watchers_mutex
);
744 list_add(&watcher
->link
, &smi_watchers
);
746 index
= srcu_read_lock(&ipmi_interfaces_srcu
);
747 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
748 int intf_num
= READ_ONCE(intf
->intf_num
);
752 watcher
->new_smi(intf_num
, intf
->si_dev
);
754 srcu_read_unlock(&ipmi_interfaces_srcu
, index
);
756 mutex_unlock(&smi_watchers_mutex
);
760 EXPORT_SYMBOL(ipmi_smi_watcher_register
);
762 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher
*watcher
)
764 mutex_lock(&smi_watchers_mutex
);
765 list_del(&watcher
->link
);
766 mutex_unlock(&smi_watchers_mutex
);
769 EXPORT_SYMBOL(ipmi_smi_watcher_unregister
);
772 * Must be called with smi_watchers_mutex held.
775 call_smi_watchers(int i
, struct device
*dev
)
777 struct ipmi_smi_watcher
*w
;
779 mutex_lock(&smi_watchers_mutex
);
780 list_for_each_entry(w
, &smi_watchers
, link
) {
781 if (try_module_get(w
->owner
)) {
783 module_put(w
->owner
);
786 mutex_unlock(&smi_watchers_mutex
);
790 ipmi_addr_equal(struct ipmi_addr
*addr1
, struct ipmi_addr
*addr2
)
792 if (addr1
->addr_type
!= addr2
->addr_type
)
795 if (addr1
->channel
!= addr2
->channel
)
798 if (addr1
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
799 struct ipmi_system_interface_addr
*smi_addr1
800 = (struct ipmi_system_interface_addr
*) addr1
;
801 struct ipmi_system_interface_addr
*smi_addr2
802 = (struct ipmi_system_interface_addr
*) addr2
;
803 return (smi_addr1
->lun
== smi_addr2
->lun
);
806 if (is_ipmb_addr(addr1
) || is_ipmb_bcast_addr(addr1
)) {
807 struct ipmi_ipmb_addr
*ipmb_addr1
808 = (struct ipmi_ipmb_addr
*) addr1
;
809 struct ipmi_ipmb_addr
*ipmb_addr2
810 = (struct ipmi_ipmb_addr
*) addr2
;
812 return ((ipmb_addr1
->slave_addr
== ipmb_addr2
->slave_addr
)
813 && (ipmb_addr1
->lun
== ipmb_addr2
->lun
));
816 if (is_lan_addr(addr1
)) {
817 struct ipmi_lan_addr
*lan_addr1
818 = (struct ipmi_lan_addr
*) addr1
;
819 struct ipmi_lan_addr
*lan_addr2
820 = (struct ipmi_lan_addr
*) addr2
;
822 return ((lan_addr1
->remote_SWID
== lan_addr2
->remote_SWID
)
823 && (lan_addr1
->local_SWID
== lan_addr2
->local_SWID
)
824 && (lan_addr1
->session_handle
825 == lan_addr2
->session_handle
)
826 && (lan_addr1
->lun
== lan_addr2
->lun
));
832 int ipmi_validate_addr(struct ipmi_addr
*addr
, int len
)
834 if (len
< sizeof(struct ipmi_system_interface_addr
))
837 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
838 if (addr
->channel
!= IPMI_BMC_CHANNEL
)
843 if ((addr
->channel
== IPMI_BMC_CHANNEL
)
844 || (addr
->channel
>= IPMI_MAX_CHANNELS
)
845 || (addr
->channel
< 0))
848 if (is_ipmb_addr(addr
) || is_ipmb_bcast_addr(addr
)) {
849 if (len
< sizeof(struct ipmi_ipmb_addr
))
854 if (is_lan_addr(addr
)) {
855 if (len
< sizeof(struct ipmi_lan_addr
))
862 EXPORT_SYMBOL(ipmi_validate_addr
);
864 unsigned int ipmi_addr_length(int addr_type
)
866 if (addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
867 return sizeof(struct ipmi_system_interface_addr
);
869 if ((addr_type
== IPMI_IPMB_ADDR_TYPE
)
870 || (addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
))
871 return sizeof(struct ipmi_ipmb_addr
);
873 if (addr_type
== IPMI_LAN_ADDR_TYPE
)
874 return sizeof(struct ipmi_lan_addr
);
878 EXPORT_SYMBOL(ipmi_addr_length
);
880 static int deliver_response(struct ipmi_smi
*intf
, struct ipmi_recv_msg
*msg
)
885 /* Special handling for NULL users. */
886 if (intf
->null_user_handler
) {
887 intf
->null_user_handler(intf
, msg
);
889 /* No handler, so give up. */
892 ipmi_free_recv_msg(msg
);
893 } else if (oops_in_progress
) {
895 * If we are running in the panic context, calling the
896 * receive handler doesn't much meaning and has a deadlock
897 * risk. At this moment, simply skip it in that case.
899 ipmi_free_recv_msg(msg
);
902 struct ipmi_user
*user
= acquire_ipmi_user(msg
->user
, &index
);
905 user
->handler
->ipmi_recv_hndl(msg
, user
->handler_data
);
906 release_ipmi_user(user
, index
);
908 /* User went away, give up. */
909 ipmi_free_recv_msg(msg
);
917 static void deliver_local_response(struct ipmi_smi
*intf
,
918 struct ipmi_recv_msg
*msg
)
920 if (deliver_response(intf
, msg
))
921 ipmi_inc_stat(intf
, unhandled_local_responses
);
923 ipmi_inc_stat(intf
, handled_local_responses
);
926 static void deliver_err_response(struct ipmi_smi
*intf
,
927 struct ipmi_recv_msg
*msg
, int err
)
929 msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
930 msg
->msg_data
[0] = err
;
931 msg
->msg
.netfn
|= 1; /* Convert to a response. */
932 msg
->msg
.data_len
= 1;
933 msg
->msg
.data
= msg
->msg_data
;
934 deliver_local_response(intf
, msg
);
937 static void smi_add_watch(struct ipmi_smi
*intf
, unsigned int flags
)
939 unsigned long iflags
;
941 if (!intf
->handlers
->set_need_watch
)
944 spin_lock_irqsave(&intf
->watch_lock
, iflags
);
945 if (flags
& IPMI_WATCH_MASK_CHECK_MESSAGES
)
946 intf
->response_waiters
++;
948 if (flags
& IPMI_WATCH_MASK_CHECK_WATCHDOG
)
949 intf
->watchdog_waiters
++;
951 if (flags
& IPMI_WATCH_MASK_CHECK_COMMANDS
)
952 intf
->command_waiters
++;
954 if ((intf
->last_watch_mask
& flags
) != flags
) {
955 intf
->last_watch_mask
|= flags
;
956 intf
->handlers
->set_need_watch(intf
->send_info
,
957 intf
->last_watch_mask
);
959 spin_unlock_irqrestore(&intf
->watch_lock
, iflags
);
962 static void smi_remove_watch(struct ipmi_smi
*intf
, unsigned int flags
)
964 unsigned long iflags
;
966 if (!intf
->handlers
->set_need_watch
)
969 spin_lock_irqsave(&intf
->watch_lock
, iflags
);
970 if (flags
& IPMI_WATCH_MASK_CHECK_MESSAGES
)
971 intf
->response_waiters
--;
973 if (flags
& IPMI_WATCH_MASK_CHECK_WATCHDOG
)
974 intf
->watchdog_waiters
--;
976 if (flags
& IPMI_WATCH_MASK_CHECK_COMMANDS
)
977 intf
->command_waiters
--;
980 if (intf
->response_waiters
)
981 flags
|= IPMI_WATCH_MASK_CHECK_MESSAGES
;
982 if (intf
->watchdog_waiters
)
983 flags
|= IPMI_WATCH_MASK_CHECK_WATCHDOG
;
984 if (intf
->command_waiters
)
985 flags
|= IPMI_WATCH_MASK_CHECK_COMMANDS
;
987 if (intf
->last_watch_mask
!= flags
) {
988 intf
->last_watch_mask
= flags
;
989 intf
->handlers
->set_need_watch(intf
->send_info
,
990 intf
->last_watch_mask
);
992 spin_unlock_irqrestore(&intf
->watch_lock
, iflags
);
996 * Find the next sequence number not being used and add the given
997 * message with the given timeout to the sequence table. This must be
998 * called with the interface's seq_lock held.
1000 static int intf_next_seq(struct ipmi_smi
*intf
,
1001 struct ipmi_recv_msg
*recv_msg
,
1002 unsigned long timeout
,
1012 timeout
= default_retry_ms
;
1014 retries
= default_max_retries
;
1016 for (i
= intf
->curr_seq
; (i
+1)%IPMI_IPMB_NUM_SEQ
!= intf
->curr_seq
;
1017 i
= (i
+1)%IPMI_IPMB_NUM_SEQ
) {
1018 if (!intf
->seq_table
[i
].inuse
)
1022 if (!intf
->seq_table
[i
].inuse
) {
1023 intf
->seq_table
[i
].recv_msg
= recv_msg
;
1026 * Start with the maximum timeout, when the send response
1027 * comes in we will start the real timer.
1029 intf
->seq_table
[i
].timeout
= MAX_MSG_TIMEOUT
;
1030 intf
->seq_table
[i
].orig_timeout
= timeout
;
1031 intf
->seq_table
[i
].retries_left
= retries
;
1032 intf
->seq_table
[i
].broadcast
= broadcast
;
1033 intf
->seq_table
[i
].inuse
= 1;
1034 intf
->seq_table
[i
].seqid
= NEXT_SEQID(intf
->seq_table
[i
].seqid
);
1036 *seqid
= intf
->seq_table
[i
].seqid
;
1037 intf
->curr_seq
= (i
+1)%IPMI_IPMB_NUM_SEQ
;
1038 smi_add_watch(intf
, IPMI_WATCH_MASK_CHECK_MESSAGES
);
1048 * Return the receive message for the given sequence number and
1049 * release the sequence number so it can be reused. Some other data
1050 * is passed in to be sure the message matches up correctly (to help
1051 * guard against message coming in after their timeout and the
1052 * sequence number being reused).
1054 static int intf_find_seq(struct ipmi_smi
*intf
,
1058 unsigned char netfn
,
1059 struct ipmi_addr
*addr
,
1060 struct ipmi_recv_msg
**recv_msg
)
1063 unsigned long flags
;
1065 if (seq
>= IPMI_IPMB_NUM_SEQ
)
1068 spin_lock_irqsave(&intf
->seq_lock
, flags
);
1069 if (intf
->seq_table
[seq
].inuse
) {
1070 struct ipmi_recv_msg
*msg
= intf
->seq_table
[seq
].recv_msg
;
1072 if ((msg
->addr
.channel
== channel
) && (msg
->msg
.cmd
== cmd
)
1073 && (msg
->msg
.netfn
== netfn
)
1074 && (ipmi_addr_equal(addr
, &msg
->addr
))) {
1076 intf
->seq_table
[seq
].inuse
= 0;
1077 smi_remove_watch(intf
, IPMI_WATCH_MASK_CHECK_MESSAGES
);
1081 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
1087 /* Start the timer for a specific sequence table entry. */
1088 static int intf_start_seq_timer(struct ipmi_smi
*intf
,
1092 unsigned long flags
;
1094 unsigned long seqid
;
1097 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
1099 spin_lock_irqsave(&intf
->seq_lock
, flags
);
1101 * We do this verification because the user can be deleted
1102 * while a message is outstanding.
1104 if ((intf
->seq_table
[seq
].inuse
)
1105 && (intf
->seq_table
[seq
].seqid
== seqid
)) {
1106 struct seq_table
*ent
= &intf
->seq_table
[seq
];
1107 ent
->timeout
= ent
->orig_timeout
;
1110 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
1115 /* Got an error for the send message for a specific sequence number. */
1116 static int intf_err_seq(struct ipmi_smi
*intf
,
1121 unsigned long flags
;
1123 unsigned long seqid
;
1124 struct ipmi_recv_msg
*msg
= NULL
;
1127 GET_SEQ_FROM_MSGID(msgid
, seq
, seqid
);
1129 spin_lock_irqsave(&intf
->seq_lock
, flags
);
1131 * We do this verification because the user can be deleted
1132 * while a message is outstanding.
1134 if ((intf
->seq_table
[seq
].inuse
)
1135 && (intf
->seq_table
[seq
].seqid
== seqid
)) {
1136 struct seq_table
*ent
= &intf
->seq_table
[seq
];
1139 smi_remove_watch(intf
, IPMI_WATCH_MASK_CHECK_MESSAGES
);
1140 msg
= ent
->recv_msg
;
1143 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
1146 deliver_err_response(intf
, msg
, err
);
1151 static void free_user_work(struct work_struct
*work
)
1153 struct ipmi_user
*user
= container_of(work
, struct ipmi_user
,
1156 cleanup_srcu_struct(&user
->release_barrier
);
1160 int ipmi_create_user(unsigned int if_num
,
1161 const struct ipmi_user_hndl
*handler
,
1163 struct ipmi_user
**user
)
1165 unsigned long flags
;
1166 struct ipmi_user
*new_user
;
1168 struct ipmi_smi
*intf
;
1171 * There is no module usecount here, because it's not
1172 * required. Since this can only be used by and called from
1173 * other modules, they will implicitly use this module, and
1174 * thus this can't be removed unless the other modules are
1178 if (handler
== NULL
)
1182 * Make sure the driver is actually initialized, this handles
1183 * problems with initialization order.
1185 rv
= ipmi_init_msghandler();
1189 new_user
= vzalloc(sizeof(*new_user
));
1193 index
= srcu_read_lock(&ipmi_interfaces_srcu
);
1194 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
1195 if (intf
->intf_num
== if_num
)
1198 /* Not found, return an error */
1203 INIT_WORK(&new_user
->remove_work
, free_user_work
);
1205 rv
= init_srcu_struct(&new_user
->release_barrier
);
1209 if (!try_module_get(intf
->owner
)) {
1214 /* Note that each existing user holds a refcount to the interface. */
1215 kref_get(&intf
->refcount
);
1217 kref_init(&new_user
->refcount
);
1218 new_user
->handler
= handler
;
1219 new_user
->handler_data
= handler_data
;
1220 new_user
->intf
= intf
;
1221 new_user
->gets_events
= false;
1223 rcu_assign_pointer(new_user
->self
, new_user
);
1224 spin_lock_irqsave(&intf
->seq_lock
, flags
);
1225 list_add_rcu(&new_user
->link
, &intf
->users
);
1226 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
1227 if (handler
->ipmi_watchdog_pretimeout
)
1228 /* User wants pretimeouts, so make sure to watch for them. */
1229 smi_add_watch(intf
, IPMI_WATCH_MASK_CHECK_WATCHDOG
);
1230 srcu_read_unlock(&ipmi_interfaces_srcu
, index
);
1235 srcu_read_unlock(&ipmi_interfaces_srcu
, index
);
1239 EXPORT_SYMBOL(ipmi_create_user
);
1241 int ipmi_get_smi_info(int if_num
, struct ipmi_smi_info
*data
)
1244 struct ipmi_smi
*intf
;
1246 index
= srcu_read_lock(&ipmi_interfaces_srcu
);
1247 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
1248 if (intf
->intf_num
== if_num
)
1251 srcu_read_unlock(&ipmi_interfaces_srcu
, index
);
1253 /* Not found, return an error */
1257 if (!intf
->handlers
->get_smi_info
)
1260 rv
= intf
->handlers
->get_smi_info(intf
->send_info
, data
);
1261 srcu_read_unlock(&ipmi_interfaces_srcu
, index
);
1265 EXPORT_SYMBOL(ipmi_get_smi_info
);
1267 static void free_user(struct kref
*ref
)
1269 struct ipmi_user
*user
= container_of(ref
, struct ipmi_user
, refcount
);
1271 /* SRCU cleanup must happen in task context. */
1272 schedule_work(&user
->remove_work
);
1275 static void _ipmi_destroy_user(struct ipmi_user
*user
)
1277 struct ipmi_smi
*intf
= user
->intf
;
1279 unsigned long flags
;
1280 struct cmd_rcvr
*rcvr
;
1281 struct cmd_rcvr
*rcvrs
= NULL
;
1283 if (!acquire_ipmi_user(user
, &i
)) {
1285 * The user has already been cleaned up, just make sure
1286 * nothing is using it and return.
1288 synchronize_srcu(&user
->release_barrier
);
1292 rcu_assign_pointer(user
->self
, NULL
);
1293 release_ipmi_user(user
, i
);
1295 synchronize_srcu(&user
->release_barrier
);
1297 if (user
->handler
->shutdown
)
1298 user
->handler
->shutdown(user
->handler_data
);
1300 if (user
->handler
->ipmi_watchdog_pretimeout
)
1301 smi_remove_watch(intf
, IPMI_WATCH_MASK_CHECK_WATCHDOG
);
1303 if (user
->gets_events
)
1304 atomic_dec(&intf
->event_waiters
);
1306 /* Remove the user from the interface's sequence table. */
1307 spin_lock_irqsave(&intf
->seq_lock
, flags
);
1308 list_del_rcu(&user
->link
);
1310 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
1311 if (intf
->seq_table
[i
].inuse
1312 && (intf
->seq_table
[i
].recv_msg
->user
== user
)) {
1313 intf
->seq_table
[i
].inuse
= 0;
1314 smi_remove_watch(intf
, IPMI_WATCH_MASK_CHECK_MESSAGES
);
1315 ipmi_free_recv_msg(intf
->seq_table
[i
].recv_msg
);
1318 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
1321 * Remove the user from the command receiver's table. First
1322 * we build a list of everything (not using the standard link,
1323 * since other things may be using it till we do
1324 * synchronize_srcu()) then free everything in that list.
1326 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1327 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
,
1328 lockdep_is_held(&intf
->cmd_rcvrs_mutex
)) {
1329 if (rcvr
->user
== user
) {
1330 list_del_rcu(&rcvr
->link
);
1335 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1343 kref_put(&intf
->refcount
, intf_free
);
1344 module_put(intf
->owner
);
1347 int ipmi_destroy_user(struct ipmi_user
*user
)
1349 _ipmi_destroy_user(user
);
1351 kref_put(&user
->refcount
, free_user
);
1355 EXPORT_SYMBOL(ipmi_destroy_user
);
1357 int ipmi_get_version(struct ipmi_user
*user
,
1358 unsigned char *major
,
1359 unsigned char *minor
)
1361 struct ipmi_device_id id
;
1364 user
= acquire_ipmi_user(user
, &index
);
1368 rv
= bmc_get_device_id(user
->intf
, NULL
, &id
, NULL
, NULL
);
1370 *major
= ipmi_version_major(&id
);
1371 *minor
= ipmi_version_minor(&id
);
1373 release_ipmi_user(user
, index
);
1377 EXPORT_SYMBOL(ipmi_get_version
);
1379 int ipmi_set_my_address(struct ipmi_user
*user
,
1380 unsigned int channel
,
1381 unsigned char address
)
1385 user
= acquire_ipmi_user(user
, &index
);
1389 if (channel
>= IPMI_MAX_CHANNELS
) {
1392 channel
= array_index_nospec(channel
, IPMI_MAX_CHANNELS
);
1393 user
->intf
->addrinfo
[channel
].address
= address
;
1395 release_ipmi_user(user
, index
);
1399 EXPORT_SYMBOL(ipmi_set_my_address
);
1401 int ipmi_get_my_address(struct ipmi_user
*user
,
1402 unsigned int channel
,
1403 unsigned char *address
)
1407 user
= acquire_ipmi_user(user
, &index
);
1411 if (channel
>= IPMI_MAX_CHANNELS
) {
1414 channel
= array_index_nospec(channel
, IPMI_MAX_CHANNELS
);
1415 *address
= user
->intf
->addrinfo
[channel
].address
;
1417 release_ipmi_user(user
, index
);
1421 EXPORT_SYMBOL(ipmi_get_my_address
);
1423 int ipmi_set_my_LUN(struct ipmi_user
*user
,
1424 unsigned int channel
,
1429 user
= acquire_ipmi_user(user
, &index
);
1433 if (channel
>= IPMI_MAX_CHANNELS
) {
1436 channel
= array_index_nospec(channel
, IPMI_MAX_CHANNELS
);
1437 user
->intf
->addrinfo
[channel
].lun
= LUN
& 0x3;
1439 release_ipmi_user(user
, index
);
1443 EXPORT_SYMBOL(ipmi_set_my_LUN
);
1445 int ipmi_get_my_LUN(struct ipmi_user
*user
,
1446 unsigned int channel
,
1447 unsigned char *address
)
1451 user
= acquire_ipmi_user(user
, &index
);
1455 if (channel
>= IPMI_MAX_CHANNELS
) {
1458 channel
= array_index_nospec(channel
, IPMI_MAX_CHANNELS
);
1459 *address
= user
->intf
->addrinfo
[channel
].lun
;
1461 release_ipmi_user(user
, index
);
1465 EXPORT_SYMBOL(ipmi_get_my_LUN
);
1467 int ipmi_get_maintenance_mode(struct ipmi_user
*user
)
1470 unsigned long flags
;
1472 user
= acquire_ipmi_user(user
, &index
);
1476 spin_lock_irqsave(&user
->intf
->maintenance_mode_lock
, flags
);
1477 mode
= user
->intf
->maintenance_mode
;
1478 spin_unlock_irqrestore(&user
->intf
->maintenance_mode_lock
, flags
);
1479 release_ipmi_user(user
, index
);
1483 EXPORT_SYMBOL(ipmi_get_maintenance_mode
);
1485 static void maintenance_mode_update(struct ipmi_smi
*intf
)
1487 if (intf
->handlers
->set_maintenance_mode
)
1488 intf
->handlers
->set_maintenance_mode(
1489 intf
->send_info
, intf
->maintenance_mode_enable
);
1492 int ipmi_set_maintenance_mode(struct ipmi_user
*user
, int mode
)
1495 unsigned long flags
;
1496 struct ipmi_smi
*intf
= user
->intf
;
1498 user
= acquire_ipmi_user(user
, &index
);
1502 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
1503 if (intf
->maintenance_mode
!= mode
) {
1505 case IPMI_MAINTENANCE_MODE_AUTO
:
1506 intf
->maintenance_mode_enable
1507 = (intf
->auto_maintenance_timeout
> 0);
1510 case IPMI_MAINTENANCE_MODE_OFF
:
1511 intf
->maintenance_mode_enable
= false;
1514 case IPMI_MAINTENANCE_MODE_ON
:
1515 intf
->maintenance_mode_enable
= true;
1522 intf
->maintenance_mode
= mode
;
1524 maintenance_mode_update(intf
);
1527 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
, flags
);
1528 release_ipmi_user(user
, index
);
1532 EXPORT_SYMBOL(ipmi_set_maintenance_mode
);
1534 int ipmi_set_gets_events(struct ipmi_user
*user
, bool val
)
1536 unsigned long flags
;
1537 struct ipmi_smi
*intf
= user
->intf
;
1538 struct ipmi_recv_msg
*msg
, *msg2
;
1539 struct list_head msgs
;
1542 user
= acquire_ipmi_user(user
, &index
);
1546 INIT_LIST_HEAD(&msgs
);
1548 spin_lock_irqsave(&intf
->events_lock
, flags
);
1549 if (user
->gets_events
== val
)
1552 user
->gets_events
= val
;
1555 if (atomic_inc_return(&intf
->event_waiters
) == 1)
1558 atomic_dec(&intf
->event_waiters
);
1561 if (intf
->delivering_events
)
1563 * Another thread is delivering events for this, so
1564 * let it handle any new events.
1568 /* Deliver any queued events. */
1569 while (user
->gets_events
&& !list_empty(&intf
->waiting_events
)) {
1570 list_for_each_entry_safe(msg
, msg2
, &intf
->waiting_events
, link
)
1571 list_move_tail(&msg
->link
, &msgs
);
1572 intf
->waiting_events_count
= 0;
1573 if (intf
->event_msg_printed
) {
1574 dev_warn(intf
->si_dev
, "Event queue no longer full\n");
1575 intf
->event_msg_printed
= 0;
1578 intf
->delivering_events
= 1;
1579 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
1581 list_for_each_entry_safe(msg
, msg2
, &msgs
, link
) {
1583 kref_get(&user
->refcount
);
1584 deliver_local_response(intf
, msg
);
1587 spin_lock_irqsave(&intf
->events_lock
, flags
);
1588 intf
->delivering_events
= 0;
1592 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
1593 release_ipmi_user(user
, index
);
1597 EXPORT_SYMBOL(ipmi_set_gets_events
);
1599 static struct cmd_rcvr
*find_cmd_rcvr(struct ipmi_smi
*intf
,
1600 unsigned char netfn
,
1604 struct cmd_rcvr
*rcvr
;
1606 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
,
1607 lockdep_is_held(&intf
->cmd_rcvrs_mutex
)) {
1608 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
1609 && (rcvr
->chans
& (1 << chan
)))
1615 static int is_cmd_rcvr_exclusive(struct ipmi_smi
*intf
,
1616 unsigned char netfn
,
1620 struct cmd_rcvr
*rcvr
;
1622 list_for_each_entry_rcu(rcvr
, &intf
->cmd_rcvrs
, link
,
1623 lockdep_is_held(&intf
->cmd_rcvrs_mutex
)) {
1624 if ((rcvr
->netfn
== netfn
) && (rcvr
->cmd
== cmd
)
1625 && (rcvr
->chans
& chans
))
1631 int ipmi_register_for_cmd(struct ipmi_user
*user
,
1632 unsigned char netfn
,
1636 struct ipmi_smi
*intf
= user
->intf
;
1637 struct cmd_rcvr
*rcvr
;
1640 user
= acquire_ipmi_user(user
, &index
);
1644 rcvr
= kmalloc(sizeof(*rcvr
), GFP_KERNEL
);
1650 rcvr
->netfn
= netfn
;
1651 rcvr
->chans
= chans
;
1654 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1655 /* Make sure the command/netfn is not already registered. */
1656 if (!is_cmd_rcvr_exclusive(intf
, netfn
, cmd
, chans
)) {
1661 smi_add_watch(intf
, IPMI_WATCH_MASK_CHECK_COMMANDS
);
1663 list_add_rcu(&rcvr
->link
, &intf
->cmd_rcvrs
);
1666 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1670 release_ipmi_user(user
, index
);
1674 EXPORT_SYMBOL(ipmi_register_for_cmd
);
1676 int ipmi_unregister_for_cmd(struct ipmi_user
*user
,
1677 unsigned char netfn
,
1681 struct ipmi_smi
*intf
= user
->intf
;
1682 struct cmd_rcvr
*rcvr
;
1683 struct cmd_rcvr
*rcvrs
= NULL
;
1684 int i
, rv
= -ENOENT
, index
;
1686 user
= acquire_ipmi_user(user
, &index
);
1690 mutex_lock(&intf
->cmd_rcvrs_mutex
);
1691 for (i
= 0; i
< IPMI_NUM_CHANNELS
; i
++) {
1692 if (((1 << i
) & chans
) == 0)
1694 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, i
);
1697 if (rcvr
->user
== user
) {
1699 rcvr
->chans
&= ~chans
;
1700 if (rcvr
->chans
== 0) {
1701 list_del_rcu(&rcvr
->link
);
1707 mutex_unlock(&intf
->cmd_rcvrs_mutex
);
1709 release_ipmi_user(user
, index
);
1711 smi_remove_watch(intf
, IPMI_WATCH_MASK_CHECK_COMMANDS
);
1719 EXPORT_SYMBOL(ipmi_unregister_for_cmd
);
1721 static unsigned char
1722 ipmb_checksum(unsigned char *data
, int size
)
1724 unsigned char csum
= 0;
1726 for (; size
> 0; size
--, data
++)
1732 static inline void format_ipmb_msg(struct ipmi_smi_msg
*smi_msg
,
1733 struct kernel_ipmi_msg
*msg
,
1734 struct ipmi_ipmb_addr
*ipmb_addr
,
1736 unsigned char ipmb_seq
,
1738 unsigned char source_address
,
1739 unsigned char source_lun
)
1743 /* Format the IPMB header data. */
1744 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1745 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1746 smi_msg
->data
[2] = ipmb_addr
->channel
;
1748 smi_msg
->data
[3] = 0;
1749 smi_msg
->data
[i
+3] = ipmb_addr
->slave_addr
;
1750 smi_msg
->data
[i
+4] = (msg
->netfn
<< 2) | (ipmb_addr
->lun
& 0x3);
1751 smi_msg
->data
[i
+5] = ipmb_checksum(&smi_msg
->data
[i
+ 3], 2);
1752 smi_msg
->data
[i
+6] = source_address
;
1753 smi_msg
->data
[i
+7] = (ipmb_seq
<< 2) | source_lun
;
1754 smi_msg
->data
[i
+8] = msg
->cmd
;
1756 /* Now tack on the data to the message. */
1757 if (msg
->data_len
> 0)
1758 memcpy(&smi_msg
->data
[i
+ 9], msg
->data
, msg
->data_len
);
1759 smi_msg
->data_size
= msg
->data_len
+ 9;
1761 /* Now calculate the checksum and tack it on. */
1762 smi_msg
->data
[i
+smi_msg
->data_size
]
1763 = ipmb_checksum(&smi_msg
->data
[i
+ 6], smi_msg
->data_size
- 6);
1766 * Add on the checksum size and the offset from the
1769 smi_msg
->data_size
+= 1 + i
;
1771 smi_msg
->msgid
= msgid
;
1774 static inline void format_lan_msg(struct ipmi_smi_msg
*smi_msg
,
1775 struct kernel_ipmi_msg
*msg
,
1776 struct ipmi_lan_addr
*lan_addr
,
1778 unsigned char ipmb_seq
,
1779 unsigned char source_lun
)
1781 /* Format the IPMB header data. */
1782 smi_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
1783 smi_msg
->data
[1] = IPMI_SEND_MSG_CMD
;
1784 smi_msg
->data
[2] = lan_addr
->channel
;
1785 smi_msg
->data
[3] = lan_addr
->session_handle
;
1786 smi_msg
->data
[4] = lan_addr
->remote_SWID
;
1787 smi_msg
->data
[5] = (msg
->netfn
<< 2) | (lan_addr
->lun
& 0x3);
1788 smi_msg
->data
[6] = ipmb_checksum(&smi_msg
->data
[4], 2);
1789 smi_msg
->data
[7] = lan_addr
->local_SWID
;
1790 smi_msg
->data
[8] = (ipmb_seq
<< 2) | source_lun
;
1791 smi_msg
->data
[9] = msg
->cmd
;
1793 /* Now tack on the data to the message. */
1794 if (msg
->data_len
> 0)
1795 memcpy(&smi_msg
->data
[10], msg
->data
, msg
->data_len
);
1796 smi_msg
->data_size
= msg
->data_len
+ 10;
1798 /* Now calculate the checksum and tack it on. */
1799 smi_msg
->data
[smi_msg
->data_size
]
1800 = ipmb_checksum(&smi_msg
->data
[7], smi_msg
->data_size
- 7);
1803 * Add on the checksum size and the offset from the
1806 smi_msg
->data_size
+= 1;
1808 smi_msg
->msgid
= msgid
;
1811 static struct ipmi_smi_msg
*smi_add_send_msg(struct ipmi_smi
*intf
,
1812 struct ipmi_smi_msg
*smi_msg
,
1815 if (intf
->curr_msg
) {
1817 list_add_tail(&smi_msg
->link
, &intf
->hp_xmit_msgs
);
1819 list_add_tail(&smi_msg
->link
, &intf
->xmit_msgs
);
1822 intf
->curr_msg
= smi_msg
;
1828 static void smi_send(struct ipmi_smi
*intf
,
1829 const struct ipmi_smi_handlers
*handlers
,
1830 struct ipmi_smi_msg
*smi_msg
, int priority
)
1832 int run_to_completion
= intf
->run_to_completion
;
1833 unsigned long flags
= 0;
1835 if (!run_to_completion
)
1836 spin_lock_irqsave(&intf
->xmit_msgs_lock
, flags
);
1837 smi_msg
= smi_add_send_msg(intf
, smi_msg
, priority
);
1839 if (!run_to_completion
)
1840 spin_unlock_irqrestore(&intf
->xmit_msgs_lock
, flags
);
1843 handlers
->sender(intf
->send_info
, smi_msg
);
1846 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg
*msg
)
1848 return (((msg
->netfn
== IPMI_NETFN_APP_REQUEST
)
1849 && ((msg
->cmd
== IPMI_COLD_RESET_CMD
)
1850 || (msg
->cmd
== IPMI_WARM_RESET_CMD
)))
1851 || (msg
->netfn
== IPMI_NETFN_FIRMWARE_REQUEST
));
1854 static int i_ipmi_req_sysintf(struct ipmi_smi
*intf
,
1855 struct ipmi_addr
*addr
,
1857 struct kernel_ipmi_msg
*msg
,
1858 struct ipmi_smi_msg
*smi_msg
,
1859 struct ipmi_recv_msg
*recv_msg
,
1861 unsigned int retry_time_ms
)
1863 struct ipmi_system_interface_addr
*smi_addr
;
1866 /* Responses are not allowed to the SMI. */
1869 smi_addr
= (struct ipmi_system_interface_addr
*) addr
;
1870 if (smi_addr
->lun
> 3) {
1871 ipmi_inc_stat(intf
, sent_invalid_commands
);
1875 memcpy(&recv_msg
->addr
, smi_addr
, sizeof(*smi_addr
));
1877 if ((msg
->netfn
== IPMI_NETFN_APP_REQUEST
)
1878 && ((msg
->cmd
== IPMI_SEND_MSG_CMD
)
1879 || (msg
->cmd
== IPMI_GET_MSG_CMD
)
1880 || (msg
->cmd
== IPMI_READ_EVENT_MSG_BUFFER_CMD
))) {
1882 * We don't let the user do these, since we manage
1883 * the sequence numbers.
1885 ipmi_inc_stat(intf
, sent_invalid_commands
);
1889 if (is_maintenance_mode_cmd(msg
)) {
1890 unsigned long flags
;
1892 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
1893 intf
->auto_maintenance_timeout
1894 = maintenance_mode_timeout_ms
;
1895 if (!intf
->maintenance_mode
1896 && !intf
->maintenance_mode_enable
) {
1897 intf
->maintenance_mode_enable
= true;
1898 maintenance_mode_update(intf
);
1900 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
,
1904 if (msg
->data_len
+ 2 > IPMI_MAX_MSG_LENGTH
) {
1905 ipmi_inc_stat(intf
, sent_invalid_commands
);
1909 smi_msg
->data
[0] = (msg
->netfn
<< 2) | (smi_addr
->lun
& 0x3);
1910 smi_msg
->data
[1] = msg
->cmd
;
1911 smi_msg
->msgid
= msgid
;
1912 smi_msg
->user_data
= recv_msg
;
1913 if (msg
->data_len
> 0)
1914 memcpy(&smi_msg
->data
[2], msg
->data
, msg
->data_len
);
1915 smi_msg
->data_size
= msg
->data_len
+ 2;
1916 ipmi_inc_stat(intf
, sent_local_commands
);
1921 static int i_ipmi_req_ipmb(struct ipmi_smi
*intf
,
1922 struct ipmi_addr
*addr
,
1924 struct kernel_ipmi_msg
*msg
,
1925 struct ipmi_smi_msg
*smi_msg
,
1926 struct ipmi_recv_msg
*recv_msg
,
1927 unsigned char source_address
,
1928 unsigned char source_lun
,
1930 unsigned int retry_time_ms
)
1932 struct ipmi_ipmb_addr
*ipmb_addr
;
1933 unsigned char ipmb_seq
;
1936 struct ipmi_channel
*chans
;
1939 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
1940 ipmi_inc_stat(intf
, sent_invalid_commands
);
1944 chans
= READ_ONCE(intf
->channel_list
)->c
;
1946 if (chans
[addr
->channel
].medium
!= IPMI_CHANNEL_MEDIUM_IPMB
) {
1947 ipmi_inc_stat(intf
, sent_invalid_commands
);
1951 if (addr
->addr_type
== IPMI_IPMB_BROADCAST_ADDR_TYPE
) {
1953 * Broadcasts add a zero at the beginning of the
1954 * message, but otherwise is the same as an IPMB
1957 addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
1959 retries
= 0; /* Don't retry broadcasts. */
1963 * 9 for the header and 1 for the checksum, plus
1964 * possibly one for the broadcast.
1966 if ((msg
->data_len
+ 10 + broadcast
) > IPMI_MAX_MSG_LENGTH
) {
1967 ipmi_inc_stat(intf
, sent_invalid_commands
);
1971 ipmb_addr
= (struct ipmi_ipmb_addr
*) addr
;
1972 if (ipmb_addr
->lun
> 3) {
1973 ipmi_inc_stat(intf
, sent_invalid_commands
);
1977 memcpy(&recv_msg
->addr
, ipmb_addr
, sizeof(*ipmb_addr
));
1979 if (recv_msg
->msg
.netfn
& 0x1) {
1981 * It's a response, so use the user's sequence
1984 ipmi_inc_stat(intf
, sent_ipmb_responses
);
1985 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
, msgid
,
1987 source_address
, source_lun
);
1990 * Save the receive message so we can use it
1991 * to deliver the response.
1993 smi_msg
->user_data
= recv_msg
;
1995 /* It's a command, so get a sequence for it. */
1996 unsigned long flags
;
1998 spin_lock_irqsave(&intf
->seq_lock
, flags
);
2000 if (is_maintenance_mode_cmd(msg
))
2001 intf
->ipmb_maintenance_mode_timeout
=
2002 maintenance_mode_timeout_ms
;
2004 if (intf
->ipmb_maintenance_mode_timeout
&& retry_time_ms
== 0)
2005 /* Different default in maintenance mode */
2006 retry_time_ms
= default_maintenance_retry_ms
;
2009 * Create a sequence number with a 1 second
2010 * timeout and 4 retries.
2012 rv
= intf_next_seq(intf
,
2021 * We have used up all the sequence numbers,
2022 * probably, so abort.
2026 ipmi_inc_stat(intf
, sent_ipmb_commands
);
2029 * Store the sequence number in the message,
2030 * so that when the send message response
2031 * comes back we can start the timer.
2033 format_ipmb_msg(smi_msg
, msg
, ipmb_addr
,
2034 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
2035 ipmb_seq
, broadcast
,
2036 source_address
, source_lun
);
2039 * Copy the message into the recv message data, so we
2040 * can retransmit it later if necessary.
2042 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
2043 smi_msg
->data_size
);
2044 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2045 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
2048 * We don't unlock until here, because we need
2049 * to copy the completed message into the
2050 * recv_msg before we release the lock.
2051 * Otherwise, race conditions may bite us. I
2052 * know that's pretty paranoid, but I prefer
2056 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
2062 static int i_ipmi_req_lan(struct ipmi_smi
*intf
,
2063 struct ipmi_addr
*addr
,
2065 struct kernel_ipmi_msg
*msg
,
2066 struct ipmi_smi_msg
*smi_msg
,
2067 struct ipmi_recv_msg
*recv_msg
,
2068 unsigned char source_lun
,
2070 unsigned int retry_time_ms
)
2072 struct ipmi_lan_addr
*lan_addr
;
2073 unsigned char ipmb_seq
;
2075 struct ipmi_channel
*chans
;
2078 if (addr
->channel
>= IPMI_MAX_CHANNELS
) {
2079 ipmi_inc_stat(intf
, sent_invalid_commands
);
2083 chans
= READ_ONCE(intf
->channel_list
)->c
;
2085 if ((chans
[addr
->channel
].medium
2086 != IPMI_CHANNEL_MEDIUM_8023LAN
)
2087 && (chans
[addr
->channel
].medium
2088 != IPMI_CHANNEL_MEDIUM_ASYNC
)) {
2089 ipmi_inc_stat(intf
, sent_invalid_commands
);
2093 /* 11 for the header and 1 for the checksum. */
2094 if ((msg
->data_len
+ 12) > IPMI_MAX_MSG_LENGTH
) {
2095 ipmi_inc_stat(intf
, sent_invalid_commands
);
2099 lan_addr
= (struct ipmi_lan_addr
*) addr
;
2100 if (lan_addr
->lun
> 3) {
2101 ipmi_inc_stat(intf
, sent_invalid_commands
);
2105 memcpy(&recv_msg
->addr
, lan_addr
, sizeof(*lan_addr
));
2107 if (recv_msg
->msg
.netfn
& 0x1) {
2109 * It's a response, so use the user's sequence
2112 ipmi_inc_stat(intf
, sent_lan_responses
);
2113 format_lan_msg(smi_msg
, msg
, lan_addr
, msgid
,
2117 * Save the receive message so we can use it
2118 * to deliver the response.
2120 smi_msg
->user_data
= recv_msg
;
2122 /* It's a command, so get a sequence for it. */
2123 unsigned long flags
;
2125 spin_lock_irqsave(&intf
->seq_lock
, flags
);
2128 * Create a sequence number with a 1 second
2129 * timeout and 4 retries.
2131 rv
= intf_next_seq(intf
,
2140 * We have used up all the sequence numbers,
2141 * probably, so abort.
2145 ipmi_inc_stat(intf
, sent_lan_commands
);
2148 * Store the sequence number in the message,
2149 * so that when the send message response
2150 * comes back we can start the timer.
2152 format_lan_msg(smi_msg
, msg
, lan_addr
,
2153 STORE_SEQ_IN_MSGID(ipmb_seq
, seqid
),
2154 ipmb_seq
, source_lun
);
2157 * Copy the message into the recv message data, so we
2158 * can retransmit it later if necessary.
2160 memcpy(recv_msg
->msg_data
, smi_msg
->data
,
2161 smi_msg
->data_size
);
2162 recv_msg
->msg
.data
= recv_msg
->msg_data
;
2163 recv_msg
->msg
.data_len
= smi_msg
->data_size
;
2166 * We don't unlock until here, because we need
2167 * to copy the completed message into the
2168 * recv_msg before we release the lock.
2169 * Otherwise, race conditions may bite us. I
2170 * know that's pretty paranoid, but I prefer
2174 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
2181 * Separate from ipmi_request so that the user does not have to be
2182 * supplied in certain circumstances (mainly at panic time). If
2183 * messages are supplied, they will be freed, even if an error
2186 static int i_ipmi_request(struct ipmi_user
*user
,
2187 struct ipmi_smi
*intf
,
2188 struct ipmi_addr
*addr
,
2190 struct kernel_ipmi_msg
*msg
,
2191 void *user_msg_data
,
2193 struct ipmi_recv_msg
*supplied_recv
,
2195 unsigned char source_address
,
2196 unsigned char source_lun
,
2198 unsigned int retry_time_ms
)
2200 struct ipmi_smi_msg
*smi_msg
;
2201 struct ipmi_recv_msg
*recv_msg
;
2205 recv_msg
= supplied_recv
;
2207 recv_msg
= ipmi_alloc_recv_msg();
2208 if (recv_msg
== NULL
) {
2213 recv_msg
->user_msg_data
= user_msg_data
;
2216 smi_msg
= (struct ipmi_smi_msg
*) supplied_smi
;
2218 smi_msg
= ipmi_alloc_smi_msg();
2219 if (smi_msg
== NULL
) {
2221 ipmi_free_recv_msg(recv_msg
);
2228 if (intf
->in_shutdown
) {
2233 recv_msg
->user
= user
;
2235 /* The put happens when the message is freed. */
2236 kref_get(&user
->refcount
);
2237 recv_msg
->msgid
= msgid
;
2239 * Store the message to send in the receive message so timeout
2240 * responses can get the proper response data.
2242 recv_msg
->msg
= *msg
;
2244 if (addr
->addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
) {
2245 rv
= i_ipmi_req_sysintf(intf
, addr
, msgid
, msg
, smi_msg
,
2246 recv_msg
, retries
, retry_time_ms
);
2247 } else if (is_ipmb_addr(addr
) || is_ipmb_bcast_addr(addr
)) {
2248 rv
= i_ipmi_req_ipmb(intf
, addr
, msgid
, msg
, smi_msg
, recv_msg
,
2249 source_address
, source_lun
,
2250 retries
, retry_time_ms
);
2251 } else if (is_lan_addr(addr
)) {
2252 rv
= i_ipmi_req_lan(intf
, addr
, msgid
, msg
, smi_msg
, recv_msg
,
2253 source_lun
, retries
, retry_time_ms
);
2255 /* Unknown address type. */
2256 ipmi_inc_stat(intf
, sent_invalid_commands
);
2262 ipmi_free_smi_msg(smi_msg
);
2263 ipmi_free_recv_msg(recv_msg
);
2265 pr_debug("Send: %*ph\n", smi_msg
->data_size
, smi_msg
->data
);
2267 smi_send(intf
, intf
->handlers
, smi_msg
, priority
);
2275 static int check_addr(struct ipmi_smi
*intf
,
2276 struct ipmi_addr
*addr
,
2277 unsigned char *saddr
,
2280 if (addr
->channel
>= IPMI_MAX_CHANNELS
)
2282 addr
->channel
= array_index_nospec(addr
->channel
, IPMI_MAX_CHANNELS
);
2283 *lun
= intf
->addrinfo
[addr
->channel
].lun
;
2284 *saddr
= intf
->addrinfo
[addr
->channel
].address
;
2288 int ipmi_request_settime(struct ipmi_user
*user
,
2289 struct ipmi_addr
*addr
,
2291 struct kernel_ipmi_msg
*msg
,
2292 void *user_msg_data
,
2295 unsigned int retry_time_ms
)
2297 unsigned char saddr
= 0, lun
= 0;
2303 user
= acquire_ipmi_user(user
, &index
);
2307 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
2309 rv
= i_ipmi_request(user
,
2322 release_ipmi_user(user
, index
);
2325 EXPORT_SYMBOL(ipmi_request_settime
);
2327 int ipmi_request_supply_msgs(struct ipmi_user
*user
,
2328 struct ipmi_addr
*addr
,
2330 struct kernel_ipmi_msg
*msg
,
2331 void *user_msg_data
,
2333 struct ipmi_recv_msg
*supplied_recv
,
2336 unsigned char saddr
= 0, lun
= 0;
2342 user
= acquire_ipmi_user(user
, &index
);
2346 rv
= check_addr(user
->intf
, addr
, &saddr
, &lun
);
2348 rv
= i_ipmi_request(user
,
2361 release_ipmi_user(user
, index
);
2364 EXPORT_SYMBOL(ipmi_request_supply_msgs
);
2366 static void bmc_device_id_handler(struct ipmi_smi
*intf
,
2367 struct ipmi_recv_msg
*msg
)
2371 if ((msg
->addr
.addr_type
!= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
2372 || (msg
->msg
.netfn
!= IPMI_NETFN_APP_RESPONSE
)
2373 || (msg
->msg
.cmd
!= IPMI_GET_DEVICE_ID_CMD
)) {
2374 dev_warn(intf
->si_dev
,
2375 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2376 msg
->addr
.addr_type
, msg
->msg
.netfn
, msg
->msg
.cmd
);
2380 rv
= ipmi_demangle_device_id(msg
->msg
.netfn
, msg
->msg
.cmd
,
2381 msg
->msg
.data
, msg
->msg
.data_len
, &intf
->bmc
->fetch_id
);
2383 dev_warn(intf
->si_dev
, "device id demangle failed: %d\n", rv
);
2384 intf
->bmc
->dyn_id_set
= 0;
2387 * Make sure the id data is available before setting
2391 intf
->bmc
->dyn_id_set
= 1;
2394 wake_up(&intf
->waitq
);
2398 send_get_device_id_cmd(struct ipmi_smi
*intf
)
2400 struct ipmi_system_interface_addr si
;
2401 struct kernel_ipmi_msg msg
;
2403 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
2404 si
.channel
= IPMI_BMC_CHANNEL
;
2407 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
2408 msg
.cmd
= IPMI_GET_DEVICE_ID_CMD
;
2412 return i_ipmi_request(NULL
,
2414 (struct ipmi_addr
*) &si
,
2421 intf
->addrinfo
[0].address
,
2422 intf
->addrinfo
[0].lun
,
2426 static int __get_device_id(struct ipmi_smi
*intf
, struct bmc_device
*bmc
)
2430 bmc
->dyn_id_set
= 2;
2432 intf
->null_user_handler
= bmc_device_id_handler
;
2434 rv
= send_get_device_id_cmd(intf
);
2438 wait_event(intf
->waitq
, bmc
->dyn_id_set
!= 2);
2440 if (!bmc
->dyn_id_set
)
2441 rv
= -EIO
; /* Something went wrong in the fetch. */
2443 /* dyn_id_set makes the id data available. */
2446 intf
->null_user_handler
= NULL
;
2452 * Fetch the device id for the bmc/interface. You must pass in either
2453 * bmc or intf, this code will get the other one. If the data has
2454 * been recently fetched, this will just use the cached data. Otherwise
2455 * it will run a new fetch.
2457 * Except for the first time this is called (in ipmi_add_smi()),
2458 * this will always return good data;
2460 static int __bmc_get_device_id(struct ipmi_smi
*intf
, struct bmc_device
*bmc
,
2461 struct ipmi_device_id
*id
,
2462 bool *guid_set
, guid_t
*guid
, int intf_num
)
2465 int prev_dyn_id_set
, prev_guid_set
;
2466 bool intf_set
= intf
!= NULL
;
2469 mutex_lock(&bmc
->dyn_mutex
);
2471 if (list_empty(&bmc
->intfs
)) {
2472 mutex_unlock(&bmc
->dyn_mutex
);
2475 intf
= list_first_entry(&bmc
->intfs
, struct ipmi_smi
,
2477 kref_get(&intf
->refcount
);
2478 mutex_unlock(&bmc
->dyn_mutex
);
2479 mutex_lock(&intf
->bmc_reg_mutex
);
2480 mutex_lock(&bmc
->dyn_mutex
);
2481 if (intf
!= list_first_entry(&bmc
->intfs
, struct ipmi_smi
,
2483 mutex_unlock(&intf
->bmc_reg_mutex
);
2484 kref_put(&intf
->refcount
, intf_free
);
2485 goto retry_bmc_lock
;
2488 mutex_lock(&intf
->bmc_reg_mutex
);
2490 mutex_lock(&bmc
->dyn_mutex
);
2491 kref_get(&intf
->refcount
);
2494 /* If we have a valid and current ID, just return that. */
2495 if (intf
->in_bmc_register
||
2496 (bmc
->dyn_id_set
&& time_is_after_jiffies(bmc
->dyn_id_expiry
)))
2497 goto out_noprocessing
;
2499 prev_guid_set
= bmc
->dyn_guid_set
;
2502 prev_dyn_id_set
= bmc
->dyn_id_set
;
2503 rv
= __get_device_id(intf
, bmc
);
2508 * The guid, device id, manufacturer id, and product id should
2509 * not change on a BMC. If it does we have to do some dancing.
2511 if (!intf
->bmc_registered
2512 || (!prev_guid_set
&& bmc
->dyn_guid_set
)
2513 || (!prev_dyn_id_set
&& bmc
->dyn_id_set
)
2514 || (prev_guid_set
&& bmc
->dyn_guid_set
2515 && !guid_equal(&bmc
->guid
, &bmc
->fetch_guid
))
2516 || bmc
->id
.device_id
!= bmc
->fetch_id
.device_id
2517 || bmc
->id
.manufacturer_id
!= bmc
->fetch_id
.manufacturer_id
2518 || bmc
->id
.product_id
!= bmc
->fetch_id
.product_id
) {
2519 struct ipmi_device_id id
= bmc
->fetch_id
;
2520 int guid_set
= bmc
->dyn_guid_set
;
2523 guid
= bmc
->fetch_guid
;
2524 mutex_unlock(&bmc
->dyn_mutex
);
2526 __ipmi_bmc_unregister(intf
);
2527 /* Fill in the temporary BMC for good measure. */
2529 intf
->bmc
->dyn_guid_set
= guid_set
;
2530 intf
->bmc
->guid
= guid
;
2531 if (__ipmi_bmc_register(intf
, &id
, guid_set
, &guid
, intf_num
))
2532 need_waiter(intf
); /* Retry later on an error. */
2534 __scan_channels(intf
, &id
);
2539 * We weren't given the interface on the
2540 * command line, so restart the operation on
2541 * the next interface for the BMC.
2543 mutex_unlock(&intf
->bmc_reg_mutex
);
2544 mutex_lock(&bmc
->dyn_mutex
);
2545 goto retry_bmc_lock
;
2548 /* We have a new BMC, set it up. */
2550 mutex_lock(&bmc
->dyn_mutex
);
2551 goto out_noprocessing
;
2552 } else if (memcmp(&bmc
->fetch_id
, &bmc
->id
, sizeof(bmc
->id
)))
2553 /* Version info changes, scan the channels again. */
2554 __scan_channels(intf
, &bmc
->fetch_id
);
2556 bmc
->dyn_id_expiry
= jiffies
+ IPMI_DYN_DEV_ID_EXPIRY
;
2559 if (rv
&& prev_dyn_id_set
) {
2560 rv
= 0; /* Ignore failures if we have previous data. */
2561 bmc
->dyn_id_set
= prev_dyn_id_set
;
2564 bmc
->id
= bmc
->fetch_id
;
2565 if (bmc
->dyn_guid_set
)
2566 bmc
->guid
= bmc
->fetch_guid
;
2567 else if (prev_guid_set
)
2569 * The guid used to be valid and it failed to fetch,
2570 * just use the cached value.
2572 bmc
->dyn_guid_set
= prev_guid_set
;
2580 *guid_set
= bmc
->dyn_guid_set
;
2582 if (guid
&& bmc
->dyn_guid_set
)
2586 mutex_unlock(&bmc
->dyn_mutex
);
2587 mutex_unlock(&intf
->bmc_reg_mutex
);
2589 kref_put(&intf
->refcount
, intf_free
);
2593 static int bmc_get_device_id(struct ipmi_smi
*intf
, struct bmc_device
*bmc
,
2594 struct ipmi_device_id
*id
,
2595 bool *guid_set
, guid_t
*guid
)
2597 return __bmc_get_device_id(intf
, bmc
, id
, guid_set
, guid
, -1);
2600 static ssize_t
device_id_show(struct device
*dev
,
2601 struct device_attribute
*attr
,
2604 struct bmc_device
*bmc
= to_bmc_device(dev
);
2605 struct ipmi_device_id id
;
2608 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2612 return snprintf(buf
, 10, "%u\n", id
.device_id
);
2614 static DEVICE_ATTR_RO(device_id
);
2616 static ssize_t
provides_device_sdrs_show(struct device
*dev
,
2617 struct device_attribute
*attr
,
2620 struct bmc_device
*bmc
= to_bmc_device(dev
);
2621 struct ipmi_device_id id
;
2624 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2628 return snprintf(buf
, 10, "%u\n", (id
.device_revision
& 0x80) >> 7);
2630 static DEVICE_ATTR_RO(provides_device_sdrs
);
2632 static ssize_t
revision_show(struct device
*dev
, struct device_attribute
*attr
,
2635 struct bmc_device
*bmc
= to_bmc_device(dev
);
2636 struct ipmi_device_id id
;
2639 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2643 return snprintf(buf
, 20, "%u\n", id
.device_revision
& 0x0F);
2645 static DEVICE_ATTR_RO(revision
);
2647 static ssize_t
firmware_revision_show(struct device
*dev
,
2648 struct device_attribute
*attr
,
2651 struct bmc_device
*bmc
= to_bmc_device(dev
);
2652 struct ipmi_device_id id
;
2655 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2659 return snprintf(buf
, 20, "%u.%x\n", id
.firmware_revision_1
,
2660 id
.firmware_revision_2
);
2662 static DEVICE_ATTR_RO(firmware_revision
);
2664 static ssize_t
ipmi_version_show(struct device
*dev
,
2665 struct device_attribute
*attr
,
2668 struct bmc_device
*bmc
= to_bmc_device(dev
);
2669 struct ipmi_device_id id
;
2672 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2676 return snprintf(buf
, 20, "%u.%u\n",
2677 ipmi_version_major(&id
),
2678 ipmi_version_minor(&id
));
2680 static DEVICE_ATTR_RO(ipmi_version
);
2682 static ssize_t
add_dev_support_show(struct device
*dev
,
2683 struct device_attribute
*attr
,
2686 struct bmc_device
*bmc
= to_bmc_device(dev
);
2687 struct ipmi_device_id id
;
2690 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2694 return snprintf(buf
, 10, "0x%02x\n", id
.additional_device_support
);
2696 static DEVICE_ATTR(additional_device_support
, S_IRUGO
, add_dev_support_show
,
2699 static ssize_t
manufacturer_id_show(struct device
*dev
,
2700 struct device_attribute
*attr
,
2703 struct bmc_device
*bmc
= to_bmc_device(dev
);
2704 struct ipmi_device_id id
;
2707 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2711 return snprintf(buf
, 20, "0x%6.6x\n", id
.manufacturer_id
);
2713 static DEVICE_ATTR_RO(manufacturer_id
);
2715 static ssize_t
product_id_show(struct device
*dev
,
2716 struct device_attribute
*attr
,
2719 struct bmc_device
*bmc
= to_bmc_device(dev
);
2720 struct ipmi_device_id id
;
2723 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2727 return snprintf(buf
, 10, "0x%4.4x\n", id
.product_id
);
2729 static DEVICE_ATTR_RO(product_id
);
2731 static ssize_t
aux_firmware_rev_show(struct device
*dev
,
2732 struct device_attribute
*attr
,
2735 struct bmc_device
*bmc
= to_bmc_device(dev
);
2736 struct ipmi_device_id id
;
2739 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2743 return snprintf(buf
, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2744 id
.aux_firmware_revision
[3],
2745 id
.aux_firmware_revision
[2],
2746 id
.aux_firmware_revision
[1],
2747 id
.aux_firmware_revision
[0]);
2749 static DEVICE_ATTR(aux_firmware_revision
, S_IRUGO
, aux_firmware_rev_show
, NULL
);
2751 static ssize_t
guid_show(struct device
*dev
, struct device_attribute
*attr
,
2754 struct bmc_device
*bmc
= to_bmc_device(dev
);
2759 rv
= bmc_get_device_id(NULL
, bmc
, NULL
, &guid_set
, &guid
);
2765 return snprintf(buf
, UUID_STRING_LEN
+ 1 + 1, "%pUl\n", &guid
);
2767 static DEVICE_ATTR_RO(guid
);
2769 static struct attribute
*bmc_dev_attrs
[] = {
2770 &dev_attr_device_id
.attr
,
2771 &dev_attr_provides_device_sdrs
.attr
,
2772 &dev_attr_revision
.attr
,
2773 &dev_attr_firmware_revision
.attr
,
2774 &dev_attr_ipmi_version
.attr
,
2775 &dev_attr_additional_device_support
.attr
,
2776 &dev_attr_manufacturer_id
.attr
,
2777 &dev_attr_product_id
.attr
,
2778 &dev_attr_aux_firmware_revision
.attr
,
2779 &dev_attr_guid
.attr
,
2783 static umode_t
bmc_dev_attr_is_visible(struct kobject
*kobj
,
2784 struct attribute
*attr
, int idx
)
2786 struct device
*dev
= kobj_to_dev(kobj
);
2787 struct bmc_device
*bmc
= to_bmc_device(dev
);
2788 umode_t mode
= attr
->mode
;
2791 if (attr
== &dev_attr_aux_firmware_revision
.attr
) {
2792 struct ipmi_device_id id
;
2794 rv
= bmc_get_device_id(NULL
, bmc
, &id
, NULL
, NULL
);
2795 return (!rv
&& id
.aux_firmware_revision_set
) ? mode
: 0;
2797 if (attr
== &dev_attr_guid
.attr
) {
2800 rv
= bmc_get_device_id(NULL
, bmc
, NULL
, &guid_set
, NULL
);
2801 return (!rv
&& guid_set
) ? mode
: 0;
2806 static const struct attribute_group bmc_dev_attr_group
= {
2807 .attrs
= bmc_dev_attrs
,
2808 .is_visible
= bmc_dev_attr_is_visible
,
2811 static const struct attribute_group
*bmc_dev_attr_groups
[] = {
2812 &bmc_dev_attr_group
,
2816 static const struct device_type bmc_device_type
= {
2817 .groups
= bmc_dev_attr_groups
,
2820 static int __find_bmc_guid(struct device
*dev
, const void *data
)
2822 const guid_t
*guid
= data
;
2823 struct bmc_device
*bmc
;
2826 if (dev
->type
!= &bmc_device_type
)
2829 bmc
= to_bmc_device(dev
);
2830 rv
= bmc
->dyn_guid_set
&& guid_equal(&bmc
->guid
, guid
);
2832 rv
= kref_get_unless_zero(&bmc
->usecount
);
2837 * Returns with the bmc's usecount incremented, if it is non-NULL.
2839 static struct bmc_device
*ipmi_find_bmc_guid(struct device_driver
*drv
,
2843 struct bmc_device
*bmc
= NULL
;
2845 dev
= driver_find_device(drv
, NULL
, guid
, __find_bmc_guid
);
2847 bmc
= to_bmc_device(dev
);
2853 struct prod_dev_id
{
2854 unsigned int product_id
;
2855 unsigned char device_id
;
2858 static int __find_bmc_prod_dev_id(struct device
*dev
, const void *data
)
2860 const struct prod_dev_id
*cid
= data
;
2861 struct bmc_device
*bmc
;
2864 if (dev
->type
!= &bmc_device_type
)
2867 bmc
= to_bmc_device(dev
);
2868 rv
= (bmc
->id
.product_id
== cid
->product_id
2869 && bmc
->id
.device_id
== cid
->device_id
);
2871 rv
= kref_get_unless_zero(&bmc
->usecount
);
2876 * Returns with the bmc's usecount incremented, if it is non-NULL.
2878 static struct bmc_device
*ipmi_find_bmc_prod_dev_id(
2879 struct device_driver
*drv
,
2880 unsigned int product_id
, unsigned char device_id
)
2882 struct prod_dev_id id
= {
2883 .product_id
= product_id
,
2884 .device_id
= device_id
,
2887 struct bmc_device
*bmc
= NULL
;
2889 dev
= driver_find_device(drv
, NULL
, &id
, __find_bmc_prod_dev_id
);
2891 bmc
= to_bmc_device(dev
);
2897 static DEFINE_IDA(ipmi_bmc_ida
);
2900 release_bmc_device(struct device
*dev
)
2902 kfree(to_bmc_device(dev
));
2905 static void cleanup_bmc_work(struct work_struct
*work
)
2907 struct bmc_device
*bmc
= container_of(work
, struct bmc_device
,
2909 int id
= bmc
->pdev
.id
; /* Unregister overwrites id */
2911 platform_device_unregister(&bmc
->pdev
);
2912 ida_simple_remove(&ipmi_bmc_ida
, id
);
2916 cleanup_bmc_device(struct kref
*ref
)
2918 struct bmc_device
*bmc
= container_of(ref
, struct bmc_device
, usecount
);
2921 * Remove the platform device in a work queue to avoid issues
2922 * with removing the device attributes while reading a device
2925 schedule_work(&bmc
->remove_work
);
2929 * Must be called with intf->bmc_reg_mutex held.
2931 static void __ipmi_bmc_unregister(struct ipmi_smi
*intf
)
2933 struct bmc_device
*bmc
= intf
->bmc
;
2935 if (!intf
->bmc_registered
)
2938 sysfs_remove_link(&intf
->si_dev
->kobj
, "bmc");
2939 sysfs_remove_link(&bmc
->pdev
.dev
.kobj
, intf
->my_dev_name
);
2940 kfree(intf
->my_dev_name
);
2941 intf
->my_dev_name
= NULL
;
2943 mutex_lock(&bmc
->dyn_mutex
);
2944 list_del(&intf
->bmc_link
);
2945 mutex_unlock(&bmc
->dyn_mutex
);
2946 intf
->bmc
= &intf
->tmp_bmc
;
2947 kref_put(&bmc
->usecount
, cleanup_bmc_device
);
2948 intf
->bmc_registered
= false;
2951 static void ipmi_bmc_unregister(struct ipmi_smi
*intf
)
2953 mutex_lock(&intf
->bmc_reg_mutex
);
2954 __ipmi_bmc_unregister(intf
);
2955 mutex_unlock(&intf
->bmc_reg_mutex
);
2959 * Must be called with intf->bmc_reg_mutex held.
2961 static int __ipmi_bmc_register(struct ipmi_smi
*intf
,
2962 struct ipmi_device_id
*id
,
2963 bool guid_set
, guid_t
*guid
, int intf_num
)
2966 struct bmc_device
*bmc
;
2967 struct bmc_device
*old_bmc
;
2970 * platform_device_register() can cause bmc_reg_mutex to
2971 * be claimed because of the is_visible functions of
2972 * the attributes. Eliminate possible recursion and
2975 intf
->in_bmc_register
= true;
2976 mutex_unlock(&intf
->bmc_reg_mutex
);
2979 * Try to find if there is an bmc_device struct
2980 * representing the interfaced BMC already
2982 mutex_lock(&ipmidriver_mutex
);
2984 old_bmc
= ipmi_find_bmc_guid(&ipmidriver
.driver
, guid
);
2986 old_bmc
= ipmi_find_bmc_prod_dev_id(&ipmidriver
.driver
,
2991 * If there is already an bmc_device, free the new one,
2992 * otherwise register the new BMC device
2997 * Note: old_bmc already has usecount incremented by
2998 * the BMC find functions.
3000 intf
->bmc
= old_bmc
;
3001 mutex_lock(&bmc
->dyn_mutex
);
3002 list_add_tail(&intf
->bmc_link
, &bmc
->intfs
);
3003 mutex_unlock(&bmc
->dyn_mutex
);
3005 dev_info(intf
->si_dev
,
3006 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3007 bmc
->id
.manufacturer_id
,
3011 bmc
= kzalloc(sizeof(*bmc
), GFP_KERNEL
);
3016 INIT_LIST_HEAD(&bmc
->intfs
);
3017 mutex_init(&bmc
->dyn_mutex
);
3018 INIT_WORK(&bmc
->remove_work
, cleanup_bmc_work
);
3021 bmc
->dyn_id_set
= 1;
3022 bmc
->dyn_guid_set
= guid_set
;
3024 bmc
->dyn_id_expiry
= jiffies
+ IPMI_DYN_DEV_ID_EXPIRY
;
3026 bmc
->pdev
.name
= "ipmi_bmc";
3028 rv
= ida_simple_get(&ipmi_bmc_ida
, 0, 0, GFP_KERNEL
);
3034 bmc
->pdev
.dev
.driver
= &ipmidriver
.driver
;
3036 bmc
->pdev
.dev
.release
= release_bmc_device
;
3037 bmc
->pdev
.dev
.type
= &bmc_device_type
;
3038 kref_init(&bmc
->usecount
);
3041 mutex_lock(&bmc
->dyn_mutex
);
3042 list_add_tail(&intf
->bmc_link
, &bmc
->intfs
);
3043 mutex_unlock(&bmc
->dyn_mutex
);
3045 rv
= platform_device_register(&bmc
->pdev
);
3047 dev_err(intf
->si_dev
,
3048 "Unable to register bmc device: %d\n",
3053 dev_info(intf
->si_dev
,
3054 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3055 bmc
->id
.manufacturer_id
,
3061 * create symlink from system interface device to bmc device
3064 rv
= sysfs_create_link(&intf
->si_dev
->kobj
, &bmc
->pdev
.dev
.kobj
, "bmc");
3066 dev_err(intf
->si_dev
, "Unable to create bmc symlink: %d\n", rv
);
3071 intf_num
= intf
->intf_num
;
3072 intf
->my_dev_name
= kasprintf(GFP_KERNEL
, "ipmi%d", intf_num
);
3073 if (!intf
->my_dev_name
) {
3075 dev_err(intf
->si_dev
, "Unable to allocate link from BMC: %d\n",
3080 rv
= sysfs_create_link(&bmc
->pdev
.dev
.kobj
, &intf
->si_dev
->kobj
,
3083 kfree(intf
->my_dev_name
);
3084 intf
->my_dev_name
= NULL
;
3085 dev_err(intf
->si_dev
, "Unable to create symlink to bmc: %d\n",
3087 goto out_free_my_dev_name
;
3090 intf
->bmc_registered
= true;
3093 mutex_unlock(&ipmidriver_mutex
);
3094 mutex_lock(&intf
->bmc_reg_mutex
);
3095 intf
->in_bmc_register
= false;
3099 out_free_my_dev_name
:
3100 kfree(intf
->my_dev_name
);
3101 intf
->my_dev_name
= NULL
;
3104 sysfs_remove_link(&intf
->si_dev
->kobj
, "bmc");
3107 mutex_lock(&bmc
->dyn_mutex
);
3108 list_del(&intf
->bmc_link
);
3109 mutex_unlock(&bmc
->dyn_mutex
);
3110 intf
->bmc
= &intf
->tmp_bmc
;
3111 kref_put(&bmc
->usecount
, cleanup_bmc_device
);
3115 mutex_lock(&bmc
->dyn_mutex
);
3116 list_del(&intf
->bmc_link
);
3117 mutex_unlock(&bmc
->dyn_mutex
);
3118 intf
->bmc
= &intf
->tmp_bmc
;
3119 put_device(&bmc
->pdev
.dev
);
3124 send_guid_cmd(struct ipmi_smi
*intf
, int chan
)
3126 struct kernel_ipmi_msg msg
;
3127 struct ipmi_system_interface_addr si
;
3129 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3130 si
.channel
= IPMI_BMC_CHANNEL
;
3133 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
3134 msg
.cmd
= IPMI_GET_DEVICE_GUID_CMD
;
3137 return i_ipmi_request(NULL
,
3139 (struct ipmi_addr
*) &si
,
3146 intf
->addrinfo
[0].address
,
3147 intf
->addrinfo
[0].lun
,
3151 static void guid_handler(struct ipmi_smi
*intf
, struct ipmi_recv_msg
*msg
)
3153 struct bmc_device
*bmc
= intf
->bmc
;
3155 if ((msg
->addr
.addr_type
!= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3156 || (msg
->msg
.netfn
!= IPMI_NETFN_APP_RESPONSE
)
3157 || (msg
->msg
.cmd
!= IPMI_GET_DEVICE_GUID_CMD
))
3161 if (msg
->msg
.data
[0] != 0) {
3162 /* Error from getting the GUID, the BMC doesn't have one. */
3163 bmc
->dyn_guid_set
= 0;
3167 if (msg
->msg
.data_len
< UUID_SIZE
+ 1) {
3168 bmc
->dyn_guid_set
= 0;
3169 dev_warn(intf
->si_dev
,
3170 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
3171 msg
->msg
.data_len
, UUID_SIZE
+ 1);
3175 import_guid(&bmc
->fetch_guid
, msg
->msg
.data
+ 1);
3177 * Make sure the guid data is available before setting
3181 bmc
->dyn_guid_set
= 1;
3183 wake_up(&intf
->waitq
);
3186 static void __get_guid(struct ipmi_smi
*intf
)
3189 struct bmc_device
*bmc
= intf
->bmc
;
3191 bmc
->dyn_guid_set
= 2;
3192 intf
->null_user_handler
= guid_handler
;
3193 rv
= send_guid_cmd(intf
, 0);
3195 /* Send failed, no GUID available. */
3196 bmc
->dyn_guid_set
= 0;
3198 wait_event(intf
->waitq
, bmc
->dyn_guid_set
!= 2);
3200 /* dyn_guid_set makes the guid data available. */
3203 intf
->null_user_handler
= NULL
;
3207 send_channel_info_cmd(struct ipmi_smi
*intf
, int chan
)
3209 struct kernel_ipmi_msg msg
;
3210 unsigned char data
[1];
3211 struct ipmi_system_interface_addr si
;
3213 si
.addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
3214 si
.channel
= IPMI_BMC_CHANNEL
;
3217 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
3218 msg
.cmd
= IPMI_GET_CHANNEL_INFO_CMD
;
3222 return i_ipmi_request(NULL
,
3224 (struct ipmi_addr
*) &si
,
3231 intf
->addrinfo
[0].address
,
3232 intf
->addrinfo
[0].lun
,
3237 channel_handler(struct ipmi_smi
*intf
, struct ipmi_recv_msg
*msg
)
3241 unsigned int set
= intf
->curr_working_cset
;
3242 struct ipmi_channel
*chans
;
3244 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
3245 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
3246 && (msg
->msg
.cmd
== IPMI_GET_CHANNEL_INFO_CMD
)) {
3247 /* It's the one we want */
3248 if (msg
->msg
.data
[0] != 0) {
3249 /* Got an error from the channel, just go on. */
3251 if (msg
->msg
.data
[0] == IPMI_INVALID_COMMAND_ERR
) {
3253 * If the MC does not support this
3254 * command, that is legal. We just
3255 * assume it has one IPMB at channel
3258 intf
->wchannels
[set
].c
[0].medium
3259 = IPMI_CHANNEL_MEDIUM_IPMB
;
3260 intf
->wchannels
[set
].c
[0].protocol
3261 = IPMI_CHANNEL_PROTOCOL_IPMB
;
3263 intf
->channel_list
= intf
->wchannels
+ set
;
3264 intf
->channels_ready
= true;
3265 wake_up(&intf
->waitq
);
3270 if (msg
->msg
.data_len
< 4) {
3271 /* Message not big enough, just go on. */
3274 ch
= intf
->curr_channel
;
3275 chans
= intf
->wchannels
[set
].c
;
3276 chans
[ch
].medium
= msg
->msg
.data
[2] & 0x7f;
3277 chans
[ch
].protocol
= msg
->msg
.data
[3] & 0x1f;
3280 intf
->curr_channel
++;
3281 if (intf
->curr_channel
>= IPMI_MAX_CHANNELS
) {
3282 intf
->channel_list
= intf
->wchannels
+ set
;
3283 intf
->channels_ready
= true;
3284 wake_up(&intf
->waitq
);
3286 intf
->channel_list
= intf
->wchannels
+ set
;
3287 intf
->channels_ready
= true;
3288 rv
= send_channel_info_cmd(intf
, intf
->curr_channel
);
3292 /* Got an error somehow, just give up. */
3293 dev_warn(intf
->si_dev
,
3294 "Error sending channel information for channel %d: %d\n",
3295 intf
->curr_channel
, rv
);
3297 intf
->channel_list
= intf
->wchannels
+ set
;
3298 intf
->channels_ready
= true;
3299 wake_up(&intf
->waitq
);
3307 * Must be holding intf->bmc_reg_mutex to call this.
3309 static int __scan_channels(struct ipmi_smi
*intf
, struct ipmi_device_id
*id
)
3313 if (ipmi_version_major(id
) > 1
3314 || (ipmi_version_major(id
) == 1
3315 && ipmi_version_minor(id
) >= 5)) {
3319 * Start scanning the channels to see what is
3322 set
= !intf
->curr_working_cset
;
3323 intf
->curr_working_cset
= set
;
3324 memset(&intf
->wchannels
[set
], 0,
3325 sizeof(struct ipmi_channel_set
));
3327 intf
->null_user_handler
= channel_handler
;
3328 intf
->curr_channel
= 0;
3329 rv
= send_channel_info_cmd(intf
, 0);
3331 dev_warn(intf
->si_dev
,
3332 "Error sending channel information for channel 0, %d\n",
3337 /* Wait for the channel info to be read. */
3338 wait_event(intf
->waitq
, intf
->channels_ready
);
3339 intf
->null_user_handler
= NULL
;
3341 unsigned int set
= intf
->curr_working_cset
;
3343 /* Assume a single IPMB channel at zero. */
3344 intf
->wchannels
[set
].c
[0].medium
= IPMI_CHANNEL_MEDIUM_IPMB
;
3345 intf
->wchannels
[set
].c
[0].protocol
= IPMI_CHANNEL_PROTOCOL_IPMB
;
3346 intf
->channel_list
= intf
->wchannels
+ set
;
3347 intf
->channels_ready
= true;
3353 static void ipmi_poll(struct ipmi_smi
*intf
)
3355 if (intf
->handlers
->poll
)
3356 intf
->handlers
->poll(intf
->send_info
);
3357 /* In case something came in */
3358 handle_new_recv_msgs(intf
);
3361 void ipmi_poll_interface(struct ipmi_user
*user
)
3363 ipmi_poll(user
->intf
);
3365 EXPORT_SYMBOL(ipmi_poll_interface
);
3367 static void redo_bmc_reg(struct work_struct
*work
)
3369 struct ipmi_smi
*intf
= container_of(work
, struct ipmi_smi
,
3372 if (!intf
->in_shutdown
)
3373 bmc_get_device_id(intf
, NULL
, NULL
, NULL
, NULL
);
3375 kref_put(&intf
->refcount
, intf_free
);
3378 int ipmi_add_smi(struct module
*owner
,
3379 const struct ipmi_smi_handlers
*handlers
,
3381 struct device
*si_dev
,
3382 unsigned char slave_addr
)
3386 struct ipmi_smi
*intf
, *tintf
;
3387 struct list_head
*link
;
3388 struct ipmi_device_id id
;
3391 * Make sure the driver is actually initialized, this handles
3392 * problems with initialization order.
3394 rv
= ipmi_init_msghandler();
3398 intf
= kzalloc(sizeof(*intf
), GFP_KERNEL
);
3402 rv
= init_srcu_struct(&intf
->users_srcu
);
3408 intf
->owner
= owner
;
3409 intf
->bmc
= &intf
->tmp_bmc
;
3410 INIT_LIST_HEAD(&intf
->bmc
->intfs
);
3411 mutex_init(&intf
->bmc
->dyn_mutex
);
3412 INIT_LIST_HEAD(&intf
->bmc_link
);
3413 mutex_init(&intf
->bmc_reg_mutex
);
3414 intf
->intf_num
= -1; /* Mark it invalid for now. */
3415 kref_init(&intf
->refcount
);
3416 INIT_WORK(&intf
->bmc_reg_work
, redo_bmc_reg
);
3417 intf
->si_dev
= si_dev
;
3418 for (j
= 0; j
< IPMI_MAX_CHANNELS
; j
++) {
3419 intf
->addrinfo
[j
].address
= IPMI_BMC_SLAVE_ADDR
;
3420 intf
->addrinfo
[j
].lun
= 2;
3422 if (slave_addr
!= 0)
3423 intf
->addrinfo
[0].address
= slave_addr
;
3424 INIT_LIST_HEAD(&intf
->users
);
3425 intf
->handlers
= handlers
;
3426 intf
->send_info
= send_info
;
3427 spin_lock_init(&intf
->seq_lock
);
3428 for (j
= 0; j
< IPMI_IPMB_NUM_SEQ
; j
++) {
3429 intf
->seq_table
[j
].inuse
= 0;
3430 intf
->seq_table
[j
].seqid
= 0;
3433 spin_lock_init(&intf
->waiting_rcv_msgs_lock
);
3434 INIT_LIST_HEAD(&intf
->waiting_rcv_msgs
);
3435 tasklet_init(&intf
->recv_tasklet
,
3437 (unsigned long) intf
);
3438 atomic_set(&intf
->watchdog_pretimeouts_to_deliver
, 0);
3439 spin_lock_init(&intf
->xmit_msgs_lock
);
3440 INIT_LIST_HEAD(&intf
->xmit_msgs
);
3441 INIT_LIST_HEAD(&intf
->hp_xmit_msgs
);
3442 spin_lock_init(&intf
->events_lock
);
3443 spin_lock_init(&intf
->watch_lock
);
3444 atomic_set(&intf
->event_waiters
, 0);
3445 intf
->ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
3446 INIT_LIST_HEAD(&intf
->waiting_events
);
3447 intf
->waiting_events_count
= 0;
3448 mutex_init(&intf
->cmd_rcvrs_mutex
);
3449 spin_lock_init(&intf
->maintenance_mode_lock
);
3450 INIT_LIST_HEAD(&intf
->cmd_rcvrs
);
3451 init_waitqueue_head(&intf
->waitq
);
3452 for (i
= 0; i
< IPMI_NUM_STATS
; i
++)
3453 atomic_set(&intf
->stats
[i
], 0);
3455 mutex_lock(&ipmi_interfaces_mutex
);
3456 /* Look for a hole in the numbers. */
3458 link
= &ipmi_interfaces
;
3459 list_for_each_entry_rcu(tintf
, &ipmi_interfaces
, link
,
3460 ipmi_interfaces_mutex_held()) {
3461 if (tintf
->intf_num
!= i
) {
3462 link
= &tintf
->link
;
3467 /* Add the new interface in numeric order. */
3469 list_add_rcu(&intf
->link
, &ipmi_interfaces
);
3471 list_add_tail_rcu(&intf
->link
, link
);
3473 rv
= handlers
->start_processing(send_info
, intf
);
3477 rv
= __bmc_get_device_id(intf
, NULL
, &id
, NULL
, NULL
, i
);
3479 dev_err(si_dev
, "Unable to get the device id: %d\n", rv
);
3480 goto out_err_started
;
3483 mutex_lock(&intf
->bmc_reg_mutex
);
3484 rv
= __scan_channels(intf
, &id
);
3485 mutex_unlock(&intf
->bmc_reg_mutex
);
3487 goto out_err_bmc_reg
;
3490 * Keep memory order straight for RCU readers. Make
3491 * sure everything else is committed to memory before
3492 * setting intf_num to mark the interface valid.
3496 mutex_unlock(&ipmi_interfaces_mutex
);
3498 /* After this point the interface is legal to use. */
3499 call_smi_watchers(i
, intf
->si_dev
);
3504 ipmi_bmc_unregister(intf
);
3506 if (intf
->handlers
->shutdown
)
3507 intf
->handlers
->shutdown(intf
->send_info
);
3509 list_del_rcu(&intf
->link
);
3510 mutex_unlock(&ipmi_interfaces_mutex
);
3511 synchronize_srcu(&ipmi_interfaces_srcu
);
3512 cleanup_srcu_struct(&intf
->users_srcu
);
3513 kref_put(&intf
->refcount
, intf_free
);
3517 EXPORT_SYMBOL(ipmi_add_smi
);
3519 static void deliver_smi_err_response(struct ipmi_smi
*intf
,
3520 struct ipmi_smi_msg
*msg
,
3523 msg
->rsp
[0] = msg
->data
[0] | 4;
3524 msg
->rsp
[1] = msg
->data
[1];
3527 /* It's an error, so it will never requeue, no need to check return. */
3528 handle_one_recv_msg(intf
, msg
);
3531 static void cleanup_smi_msgs(struct ipmi_smi
*intf
)
3534 struct seq_table
*ent
;
3535 struct ipmi_smi_msg
*msg
;
3536 struct list_head
*entry
;
3537 struct list_head tmplist
;
3539 /* Clear out our transmit queues and hold the messages. */
3540 INIT_LIST_HEAD(&tmplist
);
3541 list_splice_tail(&intf
->hp_xmit_msgs
, &tmplist
);
3542 list_splice_tail(&intf
->xmit_msgs
, &tmplist
);
3544 /* Current message first, to preserve order */
3545 while (intf
->curr_msg
&& !list_empty(&intf
->waiting_rcv_msgs
)) {
3546 /* Wait for the message to clear out. */
3547 schedule_timeout(1);
3550 /* No need for locks, the interface is down. */
3553 * Return errors for all pending messages in queue and in the
3554 * tables waiting for remote responses.
3556 while (!list_empty(&tmplist
)) {
3557 entry
= tmplist
.next
;
3559 msg
= list_entry(entry
, struct ipmi_smi_msg
, link
);
3560 deliver_smi_err_response(intf
, msg
, IPMI_ERR_UNSPECIFIED
);
3563 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++) {
3564 ent
= &intf
->seq_table
[i
];
3567 deliver_err_response(intf
, ent
->recv_msg
, IPMI_ERR_UNSPECIFIED
);
3571 void ipmi_unregister_smi(struct ipmi_smi
*intf
)
3573 struct ipmi_smi_watcher
*w
;
3574 int intf_num
= intf
->intf_num
, index
;
3576 mutex_lock(&ipmi_interfaces_mutex
);
3577 intf
->intf_num
= -1;
3578 intf
->in_shutdown
= true;
3579 list_del_rcu(&intf
->link
);
3580 mutex_unlock(&ipmi_interfaces_mutex
);
3581 synchronize_srcu(&ipmi_interfaces_srcu
);
3583 /* At this point no users can be added to the interface. */
3586 * Call all the watcher interfaces to tell them that
3587 * an interface is going away.
3589 mutex_lock(&smi_watchers_mutex
);
3590 list_for_each_entry(w
, &smi_watchers
, link
)
3591 w
->smi_gone(intf_num
);
3592 mutex_unlock(&smi_watchers_mutex
);
3594 index
= srcu_read_lock(&intf
->users_srcu
);
3595 while (!list_empty(&intf
->users
)) {
3596 struct ipmi_user
*user
=
3597 container_of(list_next_rcu(&intf
->users
),
3598 struct ipmi_user
, link
);
3600 _ipmi_destroy_user(user
);
3602 srcu_read_unlock(&intf
->users_srcu
, index
);
3604 if (intf
->handlers
->shutdown
)
3605 intf
->handlers
->shutdown(intf
->send_info
);
3607 cleanup_smi_msgs(intf
);
3609 ipmi_bmc_unregister(intf
);
3611 cleanup_srcu_struct(&intf
->users_srcu
);
3612 kref_put(&intf
->refcount
, intf_free
);
3614 EXPORT_SYMBOL(ipmi_unregister_smi
);
3616 static int handle_ipmb_get_msg_rsp(struct ipmi_smi
*intf
,
3617 struct ipmi_smi_msg
*msg
)
3619 struct ipmi_ipmb_addr ipmb_addr
;
3620 struct ipmi_recv_msg
*recv_msg
;
3623 * This is 11, not 10, because the response must contain a
3626 if (msg
->rsp_size
< 11) {
3627 /* Message not big enough, just ignore it. */
3628 ipmi_inc_stat(intf
, invalid_ipmb_responses
);
3632 if (msg
->rsp
[2] != 0) {
3633 /* An error getting the response, just ignore it. */
3637 ipmb_addr
.addr_type
= IPMI_IPMB_ADDR_TYPE
;
3638 ipmb_addr
.slave_addr
= msg
->rsp
[6];
3639 ipmb_addr
.channel
= msg
->rsp
[3] & 0x0f;
3640 ipmb_addr
.lun
= msg
->rsp
[7] & 3;
3643 * It's a response from a remote entity. Look up the sequence
3644 * number and handle the response.
3646 if (intf_find_seq(intf
,
3650 (msg
->rsp
[4] >> 2) & (~1),
3651 (struct ipmi_addr
*) &ipmb_addr
,
3654 * We were unable to find the sequence number,
3655 * so just nuke the message.
3657 ipmi_inc_stat(intf
, unhandled_ipmb_responses
);
3661 memcpy(recv_msg
->msg_data
, &msg
->rsp
[9], msg
->rsp_size
- 9);
3663 * The other fields matched, so no need to set them, except
3664 * for netfn, which needs to be the response that was
3665 * returned, not the request value.
3667 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
3668 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3669 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
3670 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3671 if (deliver_response(intf
, recv_msg
))
3672 ipmi_inc_stat(intf
, unhandled_ipmb_responses
);
3674 ipmi_inc_stat(intf
, handled_ipmb_responses
);
3679 static int handle_ipmb_get_msg_cmd(struct ipmi_smi
*intf
,
3680 struct ipmi_smi_msg
*msg
)
3682 struct cmd_rcvr
*rcvr
;
3684 unsigned char netfn
;
3687 struct ipmi_user
*user
= NULL
;
3688 struct ipmi_ipmb_addr
*ipmb_addr
;
3689 struct ipmi_recv_msg
*recv_msg
;
3691 if (msg
->rsp_size
< 10) {
3692 /* Message not big enough, just ignore it. */
3693 ipmi_inc_stat(intf
, invalid_commands
);
3697 if (msg
->rsp
[2] != 0) {
3698 /* An error getting the response, just ignore it. */
3702 netfn
= msg
->rsp
[4] >> 2;
3704 chan
= msg
->rsp
[3] & 0xf;
3707 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
3710 kref_get(&user
->refcount
);
3716 /* We didn't find a user, deliver an error response. */
3717 ipmi_inc_stat(intf
, unhandled_commands
);
3719 msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
3720 msg
->data
[1] = IPMI_SEND_MSG_CMD
;
3721 msg
->data
[2] = msg
->rsp
[3];
3722 msg
->data
[3] = msg
->rsp
[6];
3723 msg
->data
[4] = ((netfn
+ 1) << 2) | (msg
->rsp
[7] & 0x3);
3724 msg
->data
[5] = ipmb_checksum(&msg
->data
[3], 2);
3725 msg
->data
[6] = intf
->addrinfo
[msg
->rsp
[3] & 0xf].address
;
3727 msg
->data
[7] = (msg
->rsp
[7] & 0xfc) | (msg
->rsp
[4] & 0x3);
3728 msg
->data
[8] = msg
->rsp
[8]; /* cmd */
3729 msg
->data
[9] = IPMI_INVALID_CMD_COMPLETION_CODE
;
3730 msg
->data
[10] = ipmb_checksum(&msg
->data
[6], 4);
3731 msg
->data_size
= 11;
3733 pr_debug("Invalid command: %*ph\n", msg
->data_size
, msg
->data
);
3736 if (!intf
->in_shutdown
) {
3737 smi_send(intf
, intf
->handlers
, msg
, 0);
3739 * We used the message, so return the value
3740 * that causes it to not be freed or
3747 recv_msg
= ipmi_alloc_recv_msg();
3750 * We couldn't allocate memory for the
3751 * message, so requeue it for handling
3755 kref_put(&user
->refcount
, free_user
);
3757 /* Extract the source address from the data. */
3758 ipmb_addr
= (struct ipmi_ipmb_addr
*) &recv_msg
->addr
;
3759 ipmb_addr
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
3760 ipmb_addr
->slave_addr
= msg
->rsp
[6];
3761 ipmb_addr
->lun
= msg
->rsp
[7] & 3;
3762 ipmb_addr
->channel
= msg
->rsp
[3] & 0xf;
3765 * Extract the rest of the message information
3766 * from the IPMB header.
3768 recv_msg
->user
= user
;
3769 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
3770 recv_msg
->msgid
= msg
->rsp
[7] >> 2;
3771 recv_msg
->msg
.netfn
= msg
->rsp
[4] >> 2;
3772 recv_msg
->msg
.cmd
= msg
->rsp
[8];
3773 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3776 * We chop off 10, not 9 bytes because the checksum
3777 * at the end also needs to be removed.
3779 recv_msg
->msg
.data_len
= msg
->rsp_size
- 10;
3780 memcpy(recv_msg
->msg_data
, &msg
->rsp
[9],
3781 msg
->rsp_size
- 10);
3782 if (deliver_response(intf
, recv_msg
))
3783 ipmi_inc_stat(intf
, unhandled_commands
);
3785 ipmi_inc_stat(intf
, handled_commands
);
3792 static int handle_lan_get_msg_rsp(struct ipmi_smi
*intf
,
3793 struct ipmi_smi_msg
*msg
)
3795 struct ipmi_lan_addr lan_addr
;
3796 struct ipmi_recv_msg
*recv_msg
;
3800 * This is 13, not 12, because the response must contain a
3803 if (msg
->rsp_size
< 13) {
3804 /* Message not big enough, just ignore it. */
3805 ipmi_inc_stat(intf
, invalid_lan_responses
);
3809 if (msg
->rsp
[2] != 0) {
3810 /* An error getting the response, just ignore it. */
3814 lan_addr
.addr_type
= IPMI_LAN_ADDR_TYPE
;
3815 lan_addr
.session_handle
= msg
->rsp
[4];
3816 lan_addr
.remote_SWID
= msg
->rsp
[8];
3817 lan_addr
.local_SWID
= msg
->rsp
[5];
3818 lan_addr
.channel
= msg
->rsp
[3] & 0x0f;
3819 lan_addr
.privilege
= msg
->rsp
[3] >> 4;
3820 lan_addr
.lun
= msg
->rsp
[9] & 3;
3823 * It's a response from a remote entity. Look up the sequence
3824 * number and handle the response.
3826 if (intf_find_seq(intf
,
3830 (msg
->rsp
[6] >> 2) & (~1),
3831 (struct ipmi_addr
*) &lan_addr
,
3834 * We were unable to find the sequence number,
3835 * so just nuke the message.
3837 ipmi_inc_stat(intf
, unhandled_lan_responses
);
3841 memcpy(recv_msg
->msg_data
, &msg
->rsp
[11], msg
->rsp_size
- 11);
3843 * The other fields matched, so no need to set them, except
3844 * for netfn, which needs to be the response that was
3845 * returned, not the request value.
3847 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
3848 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3849 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
3850 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
3851 if (deliver_response(intf
, recv_msg
))
3852 ipmi_inc_stat(intf
, unhandled_lan_responses
);
3854 ipmi_inc_stat(intf
, handled_lan_responses
);
3859 static int handle_lan_get_msg_cmd(struct ipmi_smi
*intf
,
3860 struct ipmi_smi_msg
*msg
)
3862 struct cmd_rcvr
*rcvr
;
3864 unsigned char netfn
;
3867 struct ipmi_user
*user
= NULL
;
3868 struct ipmi_lan_addr
*lan_addr
;
3869 struct ipmi_recv_msg
*recv_msg
;
3871 if (msg
->rsp_size
< 12) {
3872 /* Message not big enough, just ignore it. */
3873 ipmi_inc_stat(intf
, invalid_commands
);
3877 if (msg
->rsp
[2] != 0) {
3878 /* An error getting the response, just ignore it. */
3882 netfn
= msg
->rsp
[6] >> 2;
3884 chan
= msg
->rsp
[3] & 0xf;
3887 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
3890 kref_get(&user
->refcount
);
3896 /* We didn't find a user, just give up. */
3897 ipmi_inc_stat(intf
, unhandled_commands
);
3900 * Don't do anything with these messages, just allow
3905 recv_msg
= ipmi_alloc_recv_msg();
3908 * We couldn't allocate memory for the
3909 * message, so requeue it for handling later.
3912 kref_put(&user
->refcount
, free_user
);
3914 /* Extract the source address from the data. */
3915 lan_addr
= (struct ipmi_lan_addr
*) &recv_msg
->addr
;
3916 lan_addr
->addr_type
= IPMI_LAN_ADDR_TYPE
;
3917 lan_addr
->session_handle
= msg
->rsp
[4];
3918 lan_addr
->remote_SWID
= msg
->rsp
[8];
3919 lan_addr
->local_SWID
= msg
->rsp
[5];
3920 lan_addr
->lun
= msg
->rsp
[9] & 3;
3921 lan_addr
->channel
= msg
->rsp
[3] & 0xf;
3922 lan_addr
->privilege
= msg
->rsp
[3] >> 4;
3925 * Extract the rest of the message information
3926 * from the IPMB header.
3928 recv_msg
->user
= user
;
3929 recv_msg
->recv_type
= IPMI_CMD_RECV_TYPE
;
3930 recv_msg
->msgid
= msg
->rsp
[9] >> 2;
3931 recv_msg
->msg
.netfn
= msg
->rsp
[6] >> 2;
3932 recv_msg
->msg
.cmd
= msg
->rsp
[10];
3933 recv_msg
->msg
.data
= recv_msg
->msg_data
;
3936 * We chop off 12, not 11 bytes because the checksum
3937 * at the end also needs to be removed.
3939 recv_msg
->msg
.data_len
= msg
->rsp_size
- 12;
3940 memcpy(recv_msg
->msg_data
, &msg
->rsp
[11],
3941 msg
->rsp_size
- 12);
3942 if (deliver_response(intf
, recv_msg
))
3943 ipmi_inc_stat(intf
, unhandled_commands
);
3945 ipmi_inc_stat(intf
, handled_commands
);
3953 * This routine will handle "Get Message" command responses with
3954 * channels that use an OEM Medium. The message format belongs to
3955 * the OEM. See IPMI 2.0 specification, Chapter 6 and
3956 * Chapter 22, sections 22.6 and 22.24 for more details.
3958 static int handle_oem_get_msg_cmd(struct ipmi_smi
*intf
,
3959 struct ipmi_smi_msg
*msg
)
3961 struct cmd_rcvr
*rcvr
;
3963 unsigned char netfn
;
3966 struct ipmi_user
*user
= NULL
;
3967 struct ipmi_system_interface_addr
*smi_addr
;
3968 struct ipmi_recv_msg
*recv_msg
;
3971 * We expect the OEM SW to perform error checking
3972 * so we just do some basic sanity checks
3974 if (msg
->rsp_size
< 4) {
3975 /* Message not big enough, just ignore it. */
3976 ipmi_inc_stat(intf
, invalid_commands
);
3980 if (msg
->rsp
[2] != 0) {
3981 /* An error getting the response, just ignore it. */
3986 * This is an OEM Message so the OEM needs to know how
3987 * handle the message. We do no interpretation.
3989 netfn
= msg
->rsp
[0] >> 2;
3991 chan
= msg
->rsp
[3] & 0xf;
3994 rcvr
= find_cmd_rcvr(intf
, netfn
, cmd
, chan
);
3997 kref_get(&user
->refcount
);
4003 /* We didn't find a user, just give up. */
4004 ipmi_inc_stat(intf
, unhandled_commands
);
4007 * Don't do anything with these messages, just allow
4013 recv_msg
= ipmi_alloc_recv_msg();
4016 * We couldn't allocate memory for the
4017 * message, so requeue it for handling
4021 kref_put(&user
->refcount
, free_user
);
4024 * OEM Messages are expected to be delivered via
4025 * the system interface to SMS software. We might
4026 * need to visit this again depending on OEM
4029 smi_addr
= ((struct ipmi_system_interface_addr
*)
4031 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
4032 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
4033 smi_addr
->lun
= msg
->rsp
[0] & 3;
4035 recv_msg
->user
= user
;
4036 recv_msg
->user_msg_data
= NULL
;
4037 recv_msg
->recv_type
= IPMI_OEM_RECV_TYPE
;
4038 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
4039 recv_msg
->msg
.cmd
= msg
->rsp
[1];
4040 recv_msg
->msg
.data
= recv_msg
->msg_data
;
4043 * The message starts at byte 4 which follows the
4044 * the Channel Byte in the "GET MESSAGE" command
4046 recv_msg
->msg
.data_len
= msg
->rsp_size
- 4;
4047 memcpy(recv_msg
->msg_data
, &msg
->rsp
[4],
4049 if (deliver_response(intf
, recv_msg
))
4050 ipmi_inc_stat(intf
, unhandled_commands
);
4052 ipmi_inc_stat(intf
, handled_commands
);
4059 static void copy_event_into_recv_msg(struct ipmi_recv_msg
*recv_msg
,
4060 struct ipmi_smi_msg
*msg
)
4062 struct ipmi_system_interface_addr
*smi_addr
;
4064 recv_msg
->msgid
= 0;
4065 smi_addr
= (struct ipmi_system_interface_addr
*) &recv_msg
->addr
;
4066 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
4067 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
4068 smi_addr
->lun
= msg
->rsp
[0] & 3;
4069 recv_msg
->recv_type
= IPMI_ASYNC_EVENT_RECV_TYPE
;
4070 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
4071 recv_msg
->msg
.cmd
= msg
->rsp
[1];
4072 memcpy(recv_msg
->msg_data
, &msg
->rsp
[3], msg
->rsp_size
- 3);
4073 recv_msg
->msg
.data
= recv_msg
->msg_data
;
4074 recv_msg
->msg
.data_len
= msg
->rsp_size
- 3;
4077 static int handle_read_event_rsp(struct ipmi_smi
*intf
,
4078 struct ipmi_smi_msg
*msg
)
4080 struct ipmi_recv_msg
*recv_msg
, *recv_msg2
;
4081 struct list_head msgs
;
4082 struct ipmi_user
*user
;
4083 int rv
= 0, deliver_count
= 0, index
;
4084 unsigned long flags
;
4086 if (msg
->rsp_size
< 19) {
4087 /* Message is too small to be an IPMB event. */
4088 ipmi_inc_stat(intf
, invalid_events
);
4092 if (msg
->rsp
[2] != 0) {
4093 /* An error getting the event, just ignore it. */
4097 INIT_LIST_HEAD(&msgs
);
4099 spin_lock_irqsave(&intf
->events_lock
, flags
);
4101 ipmi_inc_stat(intf
, events
);
4104 * Allocate and fill in one message for every user that is
4107 index
= srcu_read_lock(&intf
->users_srcu
);
4108 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
4109 if (!user
->gets_events
)
4112 recv_msg
= ipmi_alloc_recv_msg();
4115 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
,
4117 list_del(&recv_msg
->link
);
4118 ipmi_free_recv_msg(recv_msg
);
4121 * We couldn't allocate memory for the
4122 * message, so requeue it for handling
4131 copy_event_into_recv_msg(recv_msg
, msg
);
4132 recv_msg
->user
= user
;
4133 kref_get(&user
->refcount
);
4134 list_add_tail(&recv_msg
->link
, &msgs
);
4136 srcu_read_unlock(&intf
->users_srcu
, index
);
4138 if (deliver_count
) {
4139 /* Now deliver all the messages. */
4140 list_for_each_entry_safe(recv_msg
, recv_msg2
, &msgs
, link
) {
4141 list_del(&recv_msg
->link
);
4142 deliver_local_response(intf
, recv_msg
);
4144 } else if (intf
->waiting_events_count
< MAX_EVENTS_IN_QUEUE
) {
4146 * No one to receive the message, put it in queue if there's
4147 * not already too many things in the queue.
4149 recv_msg
= ipmi_alloc_recv_msg();
4152 * We couldn't allocate memory for the
4153 * message, so requeue it for handling
4160 copy_event_into_recv_msg(recv_msg
, msg
);
4161 list_add_tail(&recv_msg
->link
, &intf
->waiting_events
);
4162 intf
->waiting_events_count
++;
4163 } else if (!intf
->event_msg_printed
) {
4165 * There's too many things in the queue, discard this
4168 dev_warn(intf
->si_dev
,
4169 "Event queue full, discarding incoming events\n");
4170 intf
->event_msg_printed
= 1;
4174 spin_unlock_irqrestore(&intf
->events_lock
, flags
);
4179 static int handle_bmc_rsp(struct ipmi_smi
*intf
,
4180 struct ipmi_smi_msg
*msg
)
4182 struct ipmi_recv_msg
*recv_msg
;
4183 struct ipmi_system_interface_addr
*smi_addr
;
4185 recv_msg
= (struct ipmi_recv_msg
*) msg
->user_data
;
4186 if (recv_msg
== NULL
) {
4187 dev_warn(intf
->si_dev
,
4188 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4192 recv_msg
->recv_type
= IPMI_RESPONSE_RECV_TYPE
;
4193 recv_msg
->msgid
= msg
->msgid
;
4194 smi_addr
= ((struct ipmi_system_interface_addr
*)
4196 smi_addr
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
4197 smi_addr
->channel
= IPMI_BMC_CHANNEL
;
4198 smi_addr
->lun
= msg
->rsp
[0] & 3;
4199 recv_msg
->msg
.netfn
= msg
->rsp
[0] >> 2;
4200 recv_msg
->msg
.cmd
= msg
->rsp
[1];
4201 memcpy(recv_msg
->msg_data
, &msg
->rsp
[2], msg
->rsp_size
- 2);
4202 recv_msg
->msg
.data
= recv_msg
->msg_data
;
4203 recv_msg
->msg
.data_len
= msg
->rsp_size
- 2;
4204 deliver_local_response(intf
, recv_msg
);
4210 * Handle a received message. Return 1 if the message should be requeued,
4211 * 0 if the message should be freed, or -1 if the message should not
4212 * be freed or requeued.
4214 static int handle_one_recv_msg(struct ipmi_smi
*intf
,
4215 struct ipmi_smi_msg
*msg
)
4220 pr_debug("Recv: %*ph\n", msg
->rsp_size
, msg
->rsp
);
4222 if ((msg
->data_size
>= 2)
4223 && (msg
->data
[0] == (IPMI_NETFN_APP_REQUEST
<< 2))
4224 && (msg
->data
[1] == IPMI_SEND_MSG_CMD
)
4225 && (msg
->user_data
== NULL
)) {
4227 if (intf
->in_shutdown
)
4231 * This is the local response to a command send, start
4232 * the timer for these. The user_data will not be
4233 * NULL if this is a response send, and we will let
4234 * response sends just go through.
4238 * Check for errors, if we get certain errors (ones
4239 * that mean basically we can try again later), we
4240 * ignore them and start the timer. Otherwise we
4241 * report the error immediately.
4243 if ((msg
->rsp_size
>= 3) && (msg
->rsp
[2] != 0)
4244 && (msg
->rsp
[2] != IPMI_NODE_BUSY_ERR
)
4245 && (msg
->rsp
[2] != IPMI_LOST_ARBITRATION_ERR
)
4246 && (msg
->rsp
[2] != IPMI_BUS_ERR
)
4247 && (msg
->rsp
[2] != IPMI_NAK_ON_WRITE_ERR
)) {
4248 int ch
= msg
->rsp
[3] & 0xf;
4249 struct ipmi_channel
*chans
;
4251 /* Got an error sending the message, handle it. */
4253 chans
= READ_ONCE(intf
->channel_list
)->c
;
4254 if ((chans
[ch
].medium
== IPMI_CHANNEL_MEDIUM_8023LAN
)
4255 || (chans
[ch
].medium
== IPMI_CHANNEL_MEDIUM_ASYNC
))
4256 ipmi_inc_stat(intf
, sent_lan_command_errs
);
4258 ipmi_inc_stat(intf
, sent_ipmb_command_errs
);
4259 intf_err_seq(intf
, msg
->msgid
, msg
->rsp
[2]);
4261 /* The message was sent, start the timer. */
4262 intf_start_seq_timer(intf
, msg
->msgid
);
4267 } else if (msg
->rsp_size
< 2) {
4268 /* Message is too small to be correct. */
4269 dev_warn(intf
->si_dev
,
4270 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4271 (msg
->data
[0] >> 2) | 1, msg
->data
[1], msg
->rsp_size
);
4273 /* Generate an error response for the message. */
4274 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
4275 msg
->rsp
[1] = msg
->data
[1];
4276 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
4278 } else if (((msg
->rsp
[0] >> 2) != ((msg
->data
[0] >> 2) | 1))
4279 || (msg
->rsp
[1] != msg
->data
[1])) {
4281 * The NetFN and Command in the response is not even
4282 * marginally correct.
4284 dev_warn(intf
->si_dev
,
4285 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4286 (msg
->data
[0] >> 2) | 1, msg
->data
[1],
4287 msg
->rsp
[0] >> 2, msg
->rsp
[1]);
4289 /* Generate an error response for the message. */
4290 msg
->rsp
[0] = msg
->data
[0] | (1 << 2);
4291 msg
->rsp
[1] = msg
->data
[1];
4292 msg
->rsp
[2] = IPMI_ERR_UNSPECIFIED
;
4296 if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
4297 && (msg
->rsp
[1] == IPMI_SEND_MSG_CMD
)
4298 && (msg
->user_data
!= NULL
)) {
4300 * It's a response to a response we sent. For this we
4301 * deliver a send message response to the user.
4303 struct ipmi_recv_msg
*recv_msg
= msg
->user_data
;
4306 if (msg
->rsp_size
< 2)
4307 /* Message is too small to be correct. */
4310 chan
= msg
->data
[2] & 0x0f;
4311 if (chan
>= IPMI_MAX_CHANNELS
)
4312 /* Invalid channel number */
4318 recv_msg
->recv_type
= IPMI_RESPONSE_RESPONSE_TYPE
;
4319 recv_msg
->msg
.data
= recv_msg
->msg_data
;
4320 recv_msg
->msg
.data_len
= 1;
4321 recv_msg
->msg_data
[0] = msg
->rsp
[2];
4322 deliver_local_response(intf
, recv_msg
);
4323 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
4324 && (msg
->rsp
[1] == IPMI_GET_MSG_CMD
)) {
4325 struct ipmi_channel
*chans
;
4327 /* It's from the receive queue. */
4328 chan
= msg
->rsp
[3] & 0xf;
4329 if (chan
>= IPMI_MAX_CHANNELS
) {
4330 /* Invalid channel number */
4336 * We need to make sure the channels have been initialized.
4337 * The channel_handler routine will set the "curr_channel"
4338 * equal to or greater than IPMI_MAX_CHANNELS when all the
4339 * channels for this interface have been initialized.
4341 if (!intf
->channels_ready
) {
4342 requeue
= 0; /* Throw the message away */
4346 chans
= READ_ONCE(intf
->channel_list
)->c
;
4348 switch (chans
[chan
].medium
) {
4349 case IPMI_CHANNEL_MEDIUM_IPMB
:
4350 if (msg
->rsp
[4] & 0x04) {
4352 * It's a response, so find the
4353 * requesting message and send it up.
4355 requeue
= handle_ipmb_get_msg_rsp(intf
, msg
);
4358 * It's a command to the SMS from some other
4359 * entity. Handle that.
4361 requeue
= handle_ipmb_get_msg_cmd(intf
, msg
);
4365 case IPMI_CHANNEL_MEDIUM_8023LAN
:
4366 case IPMI_CHANNEL_MEDIUM_ASYNC
:
4367 if (msg
->rsp
[6] & 0x04) {
4369 * It's a response, so find the
4370 * requesting message and send it up.
4372 requeue
= handle_lan_get_msg_rsp(intf
, msg
);
4375 * It's a command to the SMS from some other
4376 * entity. Handle that.
4378 requeue
= handle_lan_get_msg_cmd(intf
, msg
);
4383 /* Check for OEM Channels. Clients had better
4384 register for these commands. */
4385 if ((chans
[chan
].medium
>= IPMI_CHANNEL_MEDIUM_OEM_MIN
)
4386 && (chans
[chan
].medium
4387 <= IPMI_CHANNEL_MEDIUM_OEM_MAX
)) {
4388 requeue
= handle_oem_get_msg_cmd(intf
, msg
);
4391 * We don't handle the channel type, so just
4398 } else if ((msg
->rsp
[0] == ((IPMI_NETFN_APP_REQUEST
|1) << 2))
4399 && (msg
->rsp
[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD
)) {
4400 /* It's an asynchronous event. */
4401 requeue
= handle_read_event_rsp(intf
, msg
);
4403 /* It's a response from the local BMC. */
4404 requeue
= handle_bmc_rsp(intf
, msg
);
4412 * If there are messages in the queue or pretimeouts, handle them.
4414 static void handle_new_recv_msgs(struct ipmi_smi
*intf
)
4416 struct ipmi_smi_msg
*smi_msg
;
4417 unsigned long flags
= 0;
4419 int run_to_completion
= intf
->run_to_completion
;
4421 /* See if any waiting messages need to be processed. */
4422 if (!run_to_completion
)
4423 spin_lock_irqsave(&intf
->waiting_rcv_msgs_lock
, flags
);
4424 while (!list_empty(&intf
->waiting_rcv_msgs
)) {
4425 smi_msg
= list_entry(intf
->waiting_rcv_msgs
.next
,
4426 struct ipmi_smi_msg
, link
);
4427 list_del(&smi_msg
->link
);
4428 if (!run_to_completion
)
4429 spin_unlock_irqrestore(&intf
->waiting_rcv_msgs_lock
,
4431 rv
= handle_one_recv_msg(intf
, smi_msg
);
4432 if (!run_to_completion
)
4433 spin_lock_irqsave(&intf
->waiting_rcv_msgs_lock
, flags
);
4436 * To preserve message order, quit if we
4437 * can't handle a message. Add the message
4438 * back at the head, this is safe because this
4439 * tasklet is the only thing that pulls the
4442 list_add(&smi_msg
->link
, &intf
->waiting_rcv_msgs
);
4446 /* Message handled */
4447 ipmi_free_smi_msg(smi_msg
);
4448 /* If rv < 0, fatal error, del but don't free. */
4451 if (!run_to_completion
)
4452 spin_unlock_irqrestore(&intf
->waiting_rcv_msgs_lock
, flags
);
4455 * If the pretimout count is non-zero, decrement one from it and
4456 * deliver pretimeouts to all the users.
4458 if (atomic_add_unless(&intf
->watchdog_pretimeouts_to_deliver
, -1, 0)) {
4459 struct ipmi_user
*user
;
4462 index
= srcu_read_lock(&intf
->users_srcu
);
4463 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
4464 if (user
->handler
->ipmi_watchdog_pretimeout
)
4465 user
->handler
->ipmi_watchdog_pretimeout(
4466 user
->handler_data
);
4468 srcu_read_unlock(&intf
->users_srcu
, index
);
4472 static void smi_recv_tasklet(unsigned long val
)
4474 unsigned long flags
= 0; /* keep us warning-free. */
4475 struct ipmi_smi
*intf
= (struct ipmi_smi
*) val
;
4476 int run_to_completion
= intf
->run_to_completion
;
4477 struct ipmi_smi_msg
*newmsg
= NULL
;
4480 * Start the next message if available.
4482 * Do this here, not in the actual receiver, because we may deadlock
4483 * because the lower layer is allowed to hold locks while calling
4489 if (!run_to_completion
)
4490 spin_lock_irqsave(&intf
->xmit_msgs_lock
, flags
);
4491 if (intf
->curr_msg
== NULL
&& !intf
->in_shutdown
) {
4492 struct list_head
*entry
= NULL
;
4494 /* Pick the high priority queue first. */
4495 if (!list_empty(&intf
->hp_xmit_msgs
))
4496 entry
= intf
->hp_xmit_msgs
.next
;
4497 else if (!list_empty(&intf
->xmit_msgs
))
4498 entry
= intf
->xmit_msgs
.next
;
4502 newmsg
= list_entry(entry
, struct ipmi_smi_msg
, link
);
4503 intf
->curr_msg
= newmsg
;
4507 if (!run_to_completion
)
4508 spin_unlock_irqrestore(&intf
->xmit_msgs_lock
, flags
);
4510 intf
->handlers
->sender(intf
->send_info
, newmsg
);
4514 handle_new_recv_msgs(intf
);
4517 /* Handle a new message from the lower layer. */
4518 void ipmi_smi_msg_received(struct ipmi_smi
*intf
,
4519 struct ipmi_smi_msg
*msg
)
4521 unsigned long flags
= 0; /* keep us warning-free. */
4522 int run_to_completion
= intf
->run_to_completion
;
4525 * To preserve message order, we keep a queue and deliver from
4528 if (!run_to_completion
)
4529 spin_lock_irqsave(&intf
->waiting_rcv_msgs_lock
, flags
);
4530 list_add_tail(&msg
->link
, &intf
->waiting_rcv_msgs
);
4531 if (!run_to_completion
)
4532 spin_unlock_irqrestore(&intf
->waiting_rcv_msgs_lock
,
4535 if (!run_to_completion
)
4536 spin_lock_irqsave(&intf
->xmit_msgs_lock
, flags
);
4538 * We can get an asynchronous event or receive message in addition
4539 * to commands we send.
4541 if (msg
== intf
->curr_msg
)
4542 intf
->curr_msg
= NULL
;
4543 if (!run_to_completion
)
4544 spin_unlock_irqrestore(&intf
->xmit_msgs_lock
, flags
);
4546 if (run_to_completion
)
4547 smi_recv_tasklet((unsigned long) intf
);
4549 tasklet_schedule(&intf
->recv_tasklet
);
4551 EXPORT_SYMBOL(ipmi_smi_msg_received
);
4553 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi
*intf
)
4555 if (intf
->in_shutdown
)
4558 atomic_set(&intf
->watchdog_pretimeouts_to_deliver
, 1);
4559 tasklet_schedule(&intf
->recv_tasklet
);
4561 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout
);
4563 static struct ipmi_smi_msg
*
4564 smi_from_recv_msg(struct ipmi_smi
*intf
, struct ipmi_recv_msg
*recv_msg
,
4565 unsigned char seq
, long seqid
)
4567 struct ipmi_smi_msg
*smi_msg
= ipmi_alloc_smi_msg();
4570 * If we can't allocate the message, then just return, we
4571 * get 4 retries, so this should be ok.
4575 memcpy(smi_msg
->data
, recv_msg
->msg
.data
, recv_msg
->msg
.data_len
);
4576 smi_msg
->data_size
= recv_msg
->msg
.data_len
;
4577 smi_msg
->msgid
= STORE_SEQ_IN_MSGID(seq
, seqid
);
4579 pr_debug("Resend: %*ph\n", smi_msg
->data_size
, smi_msg
->data
);
4584 static void check_msg_timeout(struct ipmi_smi
*intf
, struct seq_table
*ent
,
4585 struct list_head
*timeouts
,
4586 unsigned long timeout_period
,
4587 int slot
, unsigned long *flags
,
4590 struct ipmi_recv_msg
*msg
;
4592 if (intf
->in_shutdown
)
4598 if (timeout_period
< ent
->timeout
) {
4599 ent
->timeout
-= timeout_period
;
4604 if (ent
->retries_left
== 0) {
4605 /* The message has used all its retries. */
4607 smi_remove_watch(intf
, IPMI_WATCH_MASK_CHECK_MESSAGES
);
4608 msg
= ent
->recv_msg
;
4609 list_add_tail(&msg
->link
, timeouts
);
4611 ipmi_inc_stat(intf
, timed_out_ipmb_broadcasts
);
4612 else if (is_lan_addr(&ent
->recv_msg
->addr
))
4613 ipmi_inc_stat(intf
, timed_out_lan_commands
);
4615 ipmi_inc_stat(intf
, timed_out_ipmb_commands
);
4617 struct ipmi_smi_msg
*smi_msg
;
4618 /* More retries, send again. */
4623 * Start with the max timer, set to normal timer after
4624 * the message is sent.
4626 ent
->timeout
= MAX_MSG_TIMEOUT
;
4627 ent
->retries_left
--;
4628 smi_msg
= smi_from_recv_msg(intf
, ent
->recv_msg
, slot
,
4631 if (is_lan_addr(&ent
->recv_msg
->addr
))
4633 dropped_rexmit_lan_commands
);
4636 dropped_rexmit_ipmb_commands
);
4640 spin_unlock_irqrestore(&intf
->seq_lock
, *flags
);
4643 * Send the new message. We send with a zero
4644 * priority. It timed out, I doubt time is that
4645 * critical now, and high priority messages are really
4646 * only for messages to the local MC, which don't get
4649 if (intf
->handlers
) {
4650 if (is_lan_addr(&ent
->recv_msg
->addr
))
4652 retransmitted_lan_commands
);
4655 retransmitted_ipmb_commands
);
4657 smi_send(intf
, intf
->handlers
, smi_msg
, 0);
4659 ipmi_free_smi_msg(smi_msg
);
4661 spin_lock_irqsave(&intf
->seq_lock
, *flags
);
4665 static bool ipmi_timeout_handler(struct ipmi_smi
*intf
,
4666 unsigned long timeout_period
)
4668 struct list_head timeouts
;
4669 struct ipmi_recv_msg
*msg
, *msg2
;
4670 unsigned long flags
;
4672 bool need_timer
= false;
4674 if (!intf
->bmc_registered
) {
4675 kref_get(&intf
->refcount
);
4676 if (!schedule_work(&intf
->bmc_reg_work
)) {
4677 kref_put(&intf
->refcount
, intf_free
);
4683 * Go through the seq table and find any messages that
4684 * have timed out, putting them in the timeouts
4687 INIT_LIST_HEAD(&timeouts
);
4688 spin_lock_irqsave(&intf
->seq_lock
, flags
);
4689 if (intf
->ipmb_maintenance_mode_timeout
) {
4690 if (intf
->ipmb_maintenance_mode_timeout
<= timeout_period
)
4691 intf
->ipmb_maintenance_mode_timeout
= 0;
4693 intf
->ipmb_maintenance_mode_timeout
-= timeout_period
;
4695 for (i
= 0; i
< IPMI_IPMB_NUM_SEQ
; i
++)
4696 check_msg_timeout(intf
, &intf
->seq_table
[i
],
4697 &timeouts
, timeout_period
, i
,
4698 &flags
, &need_timer
);
4699 spin_unlock_irqrestore(&intf
->seq_lock
, flags
);
4701 list_for_each_entry_safe(msg
, msg2
, &timeouts
, link
)
4702 deliver_err_response(intf
, msg
, IPMI_TIMEOUT_COMPLETION_CODE
);
4705 * Maintenance mode handling. Check the timeout
4706 * optimistically before we claim the lock. It may
4707 * mean a timeout gets missed occasionally, but that
4708 * only means the timeout gets extended by one period
4709 * in that case. No big deal, and it avoids the lock
4712 if (intf
->auto_maintenance_timeout
> 0) {
4713 spin_lock_irqsave(&intf
->maintenance_mode_lock
, flags
);
4714 if (intf
->auto_maintenance_timeout
> 0) {
4715 intf
->auto_maintenance_timeout
4717 if (!intf
->maintenance_mode
4718 && (intf
->auto_maintenance_timeout
<= 0)) {
4719 intf
->maintenance_mode_enable
= false;
4720 maintenance_mode_update(intf
);
4723 spin_unlock_irqrestore(&intf
->maintenance_mode_lock
,
4727 tasklet_schedule(&intf
->recv_tasklet
);
4732 static void ipmi_request_event(struct ipmi_smi
*intf
)
4734 /* No event requests when in maintenance mode. */
4735 if (intf
->maintenance_mode_enable
)
4738 if (!intf
->in_shutdown
)
4739 intf
->handlers
->request_events(intf
->send_info
);
4742 static struct timer_list ipmi_timer
;
4744 static atomic_t stop_operation
;
4746 static void ipmi_timeout(struct timer_list
*unused
)
4748 struct ipmi_smi
*intf
;
4749 bool need_timer
= false;
4752 if (atomic_read(&stop_operation
))
4755 index
= srcu_read_lock(&ipmi_interfaces_srcu
);
4756 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
4757 if (atomic_read(&intf
->event_waiters
)) {
4758 intf
->ticks_to_req_ev
--;
4759 if (intf
->ticks_to_req_ev
== 0) {
4760 ipmi_request_event(intf
);
4761 intf
->ticks_to_req_ev
= IPMI_REQUEST_EV_TIME
;
4766 need_timer
|= ipmi_timeout_handler(intf
, IPMI_TIMEOUT_TIME
);
4768 srcu_read_unlock(&ipmi_interfaces_srcu
, index
);
4771 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
4774 static void need_waiter(struct ipmi_smi
*intf
)
4776 /* Racy, but worst case we start the timer twice. */
4777 if (!timer_pending(&ipmi_timer
))
4778 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
4781 static atomic_t smi_msg_inuse_count
= ATOMIC_INIT(0);
4782 static atomic_t recv_msg_inuse_count
= ATOMIC_INIT(0);
4784 static void free_smi_msg(struct ipmi_smi_msg
*msg
)
4786 atomic_dec(&smi_msg_inuse_count
);
4790 struct ipmi_smi_msg
*ipmi_alloc_smi_msg(void)
4792 struct ipmi_smi_msg
*rv
;
4793 rv
= kmalloc(sizeof(struct ipmi_smi_msg
), GFP_ATOMIC
);
4795 rv
->done
= free_smi_msg
;
4796 rv
->user_data
= NULL
;
4797 atomic_inc(&smi_msg_inuse_count
);
4801 EXPORT_SYMBOL(ipmi_alloc_smi_msg
);
4803 static void free_recv_msg(struct ipmi_recv_msg
*msg
)
4805 atomic_dec(&recv_msg_inuse_count
);
4809 static struct ipmi_recv_msg
*ipmi_alloc_recv_msg(void)
4811 struct ipmi_recv_msg
*rv
;
4813 rv
= kmalloc(sizeof(struct ipmi_recv_msg
), GFP_ATOMIC
);
4816 rv
->done
= free_recv_msg
;
4817 atomic_inc(&recv_msg_inuse_count
);
4822 void ipmi_free_recv_msg(struct ipmi_recv_msg
*msg
)
4825 kref_put(&msg
->user
->refcount
, free_user
);
4828 EXPORT_SYMBOL(ipmi_free_recv_msg
);
4830 static atomic_t panic_done_count
= ATOMIC_INIT(0);
4832 static void dummy_smi_done_handler(struct ipmi_smi_msg
*msg
)
4834 atomic_dec(&panic_done_count
);
4837 static void dummy_recv_done_handler(struct ipmi_recv_msg
*msg
)
4839 atomic_dec(&panic_done_count
);
4843 * Inside a panic, send a message and wait for a response.
4845 static void ipmi_panic_request_and_wait(struct ipmi_smi
*intf
,
4846 struct ipmi_addr
*addr
,
4847 struct kernel_ipmi_msg
*msg
)
4849 struct ipmi_smi_msg smi_msg
;
4850 struct ipmi_recv_msg recv_msg
;
4853 smi_msg
.done
= dummy_smi_done_handler
;
4854 recv_msg
.done
= dummy_recv_done_handler
;
4855 atomic_add(2, &panic_done_count
);
4856 rv
= i_ipmi_request(NULL
,
4865 intf
->addrinfo
[0].address
,
4866 intf
->addrinfo
[0].lun
,
4867 0, 1); /* Don't retry, and don't wait. */
4869 atomic_sub(2, &panic_done_count
);
4870 else if (intf
->handlers
->flush_messages
)
4871 intf
->handlers
->flush_messages(intf
->send_info
);
4873 while (atomic_read(&panic_done_count
) != 0)
4877 static void event_receiver_fetcher(struct ipmi_smi
*intf
,
4878 struct ipmi_recv_msg
*msg
)
4880 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
4881 && (msg
->msg
.netfn
== IPMI_NETFN_SENSOR_EVENT_RESPONSE
)
4882 && (msg
->msg
.cmd
== IPMI_GET_EVENT_RECEIVER_CMD
)
4883 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
)) {
4884 /* A get event receiver command, save it. */
4885 intf
->event_receiver
= msg
->msg
.data
[1];
4886 intf
->event_receiver_lun
= msg
->msg
.data
[2] & 0x3;
4890 static void device_id_fetcher(struct ipmi_smi
*intf
, struct ipmi_recv_msg
*msg
)
4892 if ((msg
->addr
.addr_type
== IPMI_SYSTEM_INTERFACE_ADDR_TYPE
)
4893 && (msg
->msg
.netfn
== IPMI_NETFN_APP_RESPONSE
)
4894 && (msg
->msg
.cmd
== IPMI_GET_DEVICE_ID_CMD
)
4895 && (msg
->msg
.data
[0] == IPMI_CC_NO_ERROR
)) {
4897 * A get device id command, save if we are an event
4898 * receiver or generator.
4900 intf
->local_sel_device
= (msg
->msg
.data
[6] >> 2) & 1;
4901 intf
->local_event_generator
= (msg
->msg
.data
[6] >> 5) & 1;
4905 static void send_panic_events(struct ipmi_smi
*intf
, char *str
)
4907 struct kernel_ipmi_msg msg
;
4908 unsigned char data
[16];
4909 struct ipmi_system_interface_addr
*si
;
4910 struct ipmi_addr addr
;
4912 struct ipmi_ipmb_addr
*ipmb
;
4915 if (ipmi_send_panic_event
== IPMI_SEND_PANIC_EVENT_NONE
)
4918 si
= (struct ipmi_system_interface_addr
*) &addr
;
4919 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
4920 si
->channel
= IPMI_BMC_CHANNEL
;
4923 /* Fill in an event telling that we have failed. */
4924 msg
.netfn
= 0x04; /* Sensor or Event. */
4925 msg
.cmd
= 2; /* Platform event command. */
4928 data
[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4929 data
[1] = 0x03; /* This is for IPMI 1.0. */
4930 data
[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4931 data
[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4932 data
[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4935 * Put a few breadcrumbs in. Hopefully later we can add more things
4936 * to make the panic events more useful.
4944 /* Send the event announcing the panic. */
4945 ipmi_panic_request_and_wait(intf
, &addr
, &msg
);
4948 * On every interface, dump a bunch of OEM event holding the
4951 if (ipmi_send_panic_event
!= IPMI_SEND_PANIC_EVENT_STRING
|| !str
)
4955 * intf_num is used as an marker to tell if the
4956 * interface is valid. Thus we need a read barrier to
4957 * make sure data fetched before checking intf_num
4963 * First job here is to figure out where to send the
4964 * OEM events. There's no way in IPMI to send OEM
4965 * events using an event send command, so we have to
4966 * find the SEL to put them in and stick them in
4970 /* Get capabilities from the get device id. */
4971 intf
->local_sel_device
= 0;
4972 intf
->local_event_generator
= 0;
4973 intf
->event_receiver
= 0;
4975 /* Request the device info from the local MC. */
4976 msg
.netfn
= IPMI_NETFN_APP_REQUEST
;
4977 msg
.cmd
= IPMI_GET_DEVICE_ID_CMD
;
4980 intf
->null_user_handler
= device_id_fetcher
;
4981 ipmi_panic_request_and_wait(intf
, &addr
, &msg
);
4983 if (intf
->local_event_generator
) {
4984 /* Request the event receiver from the local MC. */
4985 msg
.netfn
= IPMI_NETFN_SENSOR_EVENT_REQUEST
;
4986 msg
.cmd
= IPMI_GET_EVENT_RECEIVER_CMD
;
4989 intf
->null_user_handler
= event_receiver_fetcher
;
4990 ipmi_panic_request_and_wait(intf
, &addr
, &msg
);
4992 intf
->null_user_handler
= NULL
;
4995 * Validate the event receiver. The low bit must not
4996 * be 1 (it must be a valid IPMB address), it cannot
4997 * be zero, and it must not be my address.
4999 if (((intf
->event_receiver
& 1) == 0)
5000 && (intf
->event_receiver
!= 0)
5001 && (intf
->event_receiver
!= intf
->addrinfo
[0].address
)) {
5003 * The event receiver is valid, send an IPMB
5006 ipmb
= (struct ipmi_ipmb_addr
*) &addr
;
5007 ipmb
->addr_type
= IPMI_IPMB_ADDR_TYPE
;
5008 ipmb
->channel
= 0; /* FIXME - is this right? */
5009 ipmb
->lun
= intf
->event_receiver_lun
;
5010 ipmb
->slave_addr
= intf
->event_receiver
;
5011 } else if (intf
->local_sel_device
) {
5013 * The event receiver was not valid (or was
5014 * me), but I am an SEL device, just dump it
5017 si
= (struct ipmi_system_interface_addr
*) &addr
;
5018 si
->addr_type
= IPMI_SYSTEM_INTERFACE_ADDR_TYPE
;
5019 si
->channel
= IPMI_BMC_CHANNEL
;
5022 return; /* No where to send the event. */
5024 msg
.netfn
= IPMI_NETFN_STORAGE_REQUEST
; /* Storage. */
5025 msg
.cmd
= IPMI_ADD_SEL_ENTRY_CMD
;
5031 int size
= strlen(p
);
5037 data
[2] = 0xf0; /* OEM event without timestamp. */
5038 data
[3] = intf
->addrinfo
[0].address
;
5039 data
[4] = j
++; /* sequence # */
5041 * Always give 11 bytes, so strncpy will fill
5042 * it with zeroes for me.
5044 strncpy(data
+5, p
, 11);
5047 ipmi_panic_request_and_wait(intf
, &addr
, &msg
);
5051 static int has_panicked
;
5053 static int panic_event(struct notifier_block
*this,
5054 unsigned long event
,
5057 struct ipmi_smi
*intf
;
5058 struct ipmi_user
*user
;
5064 /* For every registered interface, set it to run to completion. */
5065 list_for_each_entry_rcu(intf
, &ipmi_interfaces
, link
) {
5066 if (!intf
->handlers
|| intf
->intf_num
== -1)
5067 /* Interface is not ready. */
5070 if (!intf
->handlers
->poll
)
5074 * If we were interrupted while locking xmit_msgs_lock or
5075 * waiting_rcv_msgs_lock, the corresponding list may be
5076 * corrupted. In this case, drop items on the list for
5079 if (!spin_trylock(&intf
->xmit_msgs_lock
)) {
5080 INIT_LIST_HEAD(&intf
->xmit_msgs
);
5081 INIT_LIST_HEAD(&intf
->hp_xmit_msgs
);
5083 spin_unlock(&intf
->xmit_msgs_lock
);
5085 if (!spin_trylock(&intf
->waiting_rcv_msgs_lock
))
5086 INIT_LIST_HEAD(&intf
->waiting_rcv_msgs
);
5088 spin_unlock(&intf
->waiting_rcv_msgs_lock
);
5090 intf
->run_to_completion
= 1;
5091 if (intf
->handlers
->set_run_to_completion
)
5092 intf
->handlers
->set_run_to_completion(intf
->send_info
,
5095 list_for_each_entry_rcu(user
, &intf
->users
, link
) {
5096 if (user
->handler
->ipmi_panic_handler
)
5097 user
->handler
->ipmi_panic_handler(
5098 user
->handler_data
);
5101 send_panic_events(intf
, ptr
);
5107 /* Must be called with ipmi_interfaces_mutex held. */
5108 static int ipmi_register_driver(void)
5115 rv
= driver_register(&ipmidriver
.driver
);
5117 pr_err("Could not register IPMI driver\n");
5119 drvregistered
= true;
5123 static struct notifier_block panic_block
= {
5124 .notifier_call
= panic_event
,
5126 .priority
= 200 /* priority: INT_MAX >= x >= 0 */
5129 static int ipmi_init_msghandler(void)
5133 mutex_lock(&ipmi_interfaces_mutex
);
5134 rv
= ipmi_register_driver();
5140 init_srcu_struct(&ipmi_interfaces_srcu
);
5142 timer_setup(&ipmi_timer
, ipmi_timeout
, 0);
5143 mod_timer(&ipmi_timer
, jiffies
+ IPMI_TIMEOUT_JIFFIES
);
5145 atomic_notifier_chain_register(&panic_notifier_list
, &panic_block
);
5150 mutex_unlock(&ipmi_interfaces_mutex
);
5154 static int __init
ipmi_init_msghandler_mod(void)
5158 pr_info("version " IPMI_DRIVER_VERSION
"\n");
5160 mutex_lock(&ipmi_interfaces_mutex
);
5161 rv
= ipmi_register_driver();
5162 mutex_unlock(&ipmi_interfaces_mutex
);
5167 static void __exit
cleanup_ipmi(void)
5172 atomic_notifier_chain_unregister(&panic_notifier_list
,
5176 * This can't be called if any interfaces exist, so no worry
5177 * about shutting down the interfaces.
5181 * Tell the timer to stop, then wait for it to stop. This
5182 * avoids problems with race conditions removing the timer
5185 atomic_set(&stop_operation
, 1);
5186 del_timer_sync(&ipmi_timer
);
5188 initialized
= false;
5190 /* Check for buffer leaks. */
5191 count
= atomic_read(&smi_msg_inuse_count
);
5193 pr_warn("SMI message count %d at exit\n", count
);
5194 count
= atomic_read(&recv_msg_inuse_count
);
5196 pr_warn("recv message count %d at exit\n", count
);
5198 cleanup_srcu_struct(&ipmi_interfaces_srcu
);
5201 driver_unregister(&ipmidriver
.driver
);
5203 module_exit(cleanup_ipmi
);
5205 module_init(ipmi_init_msghandler_mod
);
5206 MODULE_LICENSE("GPL");
5207 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5208 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5210 MODULE_VERSION(IPMI_DRIVER_VERSION
);
5211 MODULE_SOFTDEP("post: ipmi_devintf");