1 // SPDX-License-Identifier: GPL-2.0+
5 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
8 * Author: MontaVista Software, Inc.
9 * Corey Minyard <minyard@mvista.com>
12 * Copyright 2002 MontaVista Software Inc.
13 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
17 * This file holds the "policy" for the interface to the SMI state
18 * machine. It does the configuration, handles timers and interrupts,
19 * and drives the real SMI state machine.
22 #define pr_fmt(fmt) "ipmi_si: " fmt
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/sched.h>
27 #include <linux/seq_file.h>
28 #include <linux/timer.h>
29 #include <linux/errno.h>
30 #include <linux/spinlock.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/notifier.h>
35 #include <linux/mutex.h>
36 #include <linux/kthread.h>
38 #include <linux/interrupt.h>
39 #include <linux/rcupdate.h>
40 #include <linux/ipmi.h>
41 #include <linux/ipmi_smi.h>
43 #include "ipmi_si_sm.h"
44 #include <linux/string.h>
45 #include <linux/ctype.h>
47 /* Measure times between events in the driver. */
50 /* Call every 10 ms. */
51 #define SI_TIMEOUT_TIME_USEC 10000
52 #define SI_USEC_PER_JIFFY (1000000/HZ)
53 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
54 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
65 /* FIXME - add watchdog stuff. */
68 /* Some BT-specific defines we need here. */
69 #define IPMI_BT_INTMASK_REG 2
70 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
71 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
73 /* 'invalid' to allow a firmware-specified interface to be disabled */
74 const char *const si_to_str
[] = { "invalid", "kcs", "smic", "bt", NULL
};
76 static bool initialized
;
79 * Indexes into stats[] in smi_info below.
81 enum si_stat_indexes
{
83 * Number of times the driver requested a timer while an operation
86 SI_STAT_short_timeouts
= 0,
89 * Number of times the driver requested a timer while nothing was in
92 SI_STAT_long_timeouts
,
94 /* Number of times the interface was idle while being polled. */
97 /* Number of interrupts the driver handled. */
100 /* Number of time the driver got an ATTN from the hardware. */
103 /* Number of times the driver requested flags from the hardware. */
104 SI_STAT_flag_fetches
,
106 /* Number of times the hardware didn't follow the state machine. */
109 /* Number of completed messages. */
110 SI_STAT_complete_transactions
,
112 /* Number of IPMI events received from the hardware. */
115 /* Number of watchdog pretimeouts. */
116 SI_STAT_watchdog_pretimeouts
,
118 /* Number of asynchronous messages received. */
119 SI_STAT_incoming_messages
,
122 /* This *must* remain last, add new values above this. */
128 struct ipmi_smi
*intf
;
129 struct si_sm_data
*si_sm
;
130 const struct si_sm_handlers
*handlers
;
132 struct ipmi_smi_msg
*waiting_msg
;
133 struct ipmi_smi_msg
*curr_msg
;
134 enum si_intf_state si_state
;
137 * Used to handle the various types of I/O that can occur with
143 * Per-OEM handler, called from handle_flags(). Returns 1
144 * when handle_flags() needs to be re-run or 0 indicating it
145 * set si_state itself.
147 int (*oem_data_avail_handler
)(struct smi_info
*smi_info
);
150 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
151 * is set to hold the flags until we are done handling everything
154 #define RECEIVE_MSG_AVAIL 0x01
155 #define EVENT_MSG_BUFFER_FULL 0x02
156 #define WDT_PRE_TIMEOUT_INT 0x08
157 #define OEM0_DATA_AVAIL 0x20
158 #define OEM1_DATA_AVAIL 0x40
159 #define OEM2_DATA_AVAIL 0x80
160 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
163 unsigned char msg_flags
;
165 /* Does the BMC have an event buffer? */
166 bool has_event_buffer
;
169 * If set to true, this will request events the next time the
170 * state machine is idle.
175 * If true, run the state machine to completion on every send
176 * call. Generally used after a panic to make sure stuff goes
179 bool run_to_completion
;
181 /* The timer for this si. */
182 struct timer_list si_timer
;
184 /* This flag is set, if the timer can be set */
185 bool timer_can_start
;
187 /* This flag is set, if the timer is running (timer_pending() isn't enough) */
190 /* The time (in jiffies) the last timeout occurred at. */
191 unsigned long last_timeout_jiffies
;
193 /* Are we waiting for the events, pretimeouts, received msgs? */
197 * The driver will disable interrupts when it gets into a
198 * situation where it cannot handle messages due to lack of
199 * memory. Once that situation clears up, it will re-enable
202 bool interrupt_disabled
;
205 * Does the BMC support events?
207 bool supports_event_msg_buff
;
210 * Can we disable interrupts the global enables receive irq
211 * bit? There are currently two forms of brokenness, some
212 * systems cannot disable the bit (which is technically within
213 * the spec but a bad idea) and some systems have the bit
214 * forced to zero even though interrupts work (which is
215 * clearly outside the spec). The next bool tells which form
216 * of brokenness is present.
218 bool cannot_disable_irq
;
221 * Some systems are broken and cannot set the irq enable
222 * bit, even if they support interrupts.
224 bool irq_enable_broken
;
226 /* Is the driver in maintenance mode? */
227 bool in_maintenance_mode
;
230 * Did we get an attention that we did not handle?
234 /* From the get device id response... */
235 struct ipmi_device_id device_id
;
237 /* Have we added the device group to the device? */
238 bool dev_group_added
;
240 /* Counters and things for the proc filesystem. */
241 atomic_t stats
[SI_NUM_STATS
];
243 struct task_struct
*thread
;
245 struct list_head link
;
248 #define smi_inc_stat(smi, stat) \
249 atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
250 #define smi_get_stat(smi, stat) \
251 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
253 #define IPMI_MAX_INTFS 4
254 static int force_kipmid
[IPMI_MAX_INTFS
];
255 static int num_force_kipmid
;
257 static unsigned int kipmid_max_busy_us
[IPMI_MAX_INTFS
];
258 static int num_max_busy_us
;
260 static bool unload_when_empty
= true;
262 static int try_smi_init(struct smi_info
*smi
);
263 static void cleanup_one_si(struct smi_info
*smi_info
);
264 static void cleanup_ipmi_si(void);
267 void debug_timestamp(struct smi_info
*smi_info
, char *msg
)
272 dev_dbg(smi_info
->io
.dev
, "**%s: %lld.%9.9ld\n",
273 msg
, t
.tv_sec
, t
.tv_nsec
);
276 #define debug_timestamp(smi_info, x)
279 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list
);
280 static int register_xaction_notifier(struct notifier_block
*nb
)
282 return atomic_notifier_chain_register(&xaction_notifier_list
, nb
);
285 static void deliver_recv_msg(struct smi_info
*smi_info
,
286 struct ipmi_smi_msg
*msg
)
288 /* Deliver the message to the upper layer. */
289 ipmi_smi_msg_received(smi_info
->intf
, msg
);
292 static void return_hosed_msg(struct smi_info
*smi_info
, int cCode
)
294 struct ipmi_smi_msg
*msg
= smi_info
->curr_msg
;
296 if (cCode
< 0 || cCode
> IPMI_ERR_UNSPECIFIED
)
297 cCode
= IPMI_ERR_UNSPECIFIED
;
298 /* else use it as is */
300 /* Make it a response */
301 msg
->rsp
[0] = msg
->data
[0] | 4;
302 msg
->rsp
[1] = msg
->data
[1];
306 smi_info
->curr_msg
= NULL
;
307 deliver_recv_msg(smi_info
, msg
);
310 static enum si_sm_result
start_next_msg(struct smi_info
*smi_info
)
314 if (!smi_info
->waiting_msg
) {
315 smi_info
->curr_msg
= NULL
;
320 smi_info
->curr_msg
= smi_info
->waiting_msg
;
321 smi_info
->waiting_msg
= NULL
;
322 debug_timestamp(smi_info
, "Start2");
323 err
= atomic_notifier_call_chain(&xaction_notifier_list
,
325 if (err
& NOTIFY_STOP_MASK
) {
326 rv
= SI_SM_CALL_WITHOUT_DELAY
;
329 err
= smi_info
->handlers
->start_transaction(
331 smi_info
->curr_msg
->data
,
332 smi_info
->curr_msg
->data_size
);
334 return_hosed_msg(smi_info
, err
);
336 rv
= SI_SM_CALL_WITHOUT_DELAY
;
342 static void smi_mod_timer(struct smi_info
*smi_info
, unsigned long new_val
)
344 if (!smi_info
->timer_can_start
)
346 smi_info
->last_timeout_jiffies
= jiffies
;
347 mod_timer(&smi_info
->si_timer
, new_val
);
348 smi_info
->timer_running
= true;
352 * Start a new message and (re)start the timer and thread.
354 static void start_new_msg(struct smi_info
*smi_info
, unsigned char *msg
,
357 smi_mod_timer(smi_info
, jiffies
+ SI_TIMEOUT_JIFFIES
);
359 if (smi_info
->thread
)
360 wake_up_process(smi_info
->thread
);
362 smi_info
->handlers
->start_transaction(smi_info
->si_sm
, msg
, size
);
365 static void start_check_enables(struct smi_info
*smi_info
)
367 unsigned char msg
[2];
369 msg
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
370 msg
[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD
;
372 start_new_msg(smi_info
, msg
, 2);
373 smi_info
->si_state
= SI_CHECKING_ENABLES
;
376 static void start_clear_flags(struct smi_info
*smi_info
)
378 unsigned char msg
[3];
380 /* Make sure the watchdog pre-timeout flag is not set at startup. */
381 msg
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
382 msg
[1] = IPMI_CLEAR_MSG_FLAGS_CMD
;
383 msg
[2] = WDT_PRE_TIMEOUT_INT
;
385 start_new_msg(smi_info
, msg
, 3);
386 smi_info
->si_state
= SI_CLEARING_FLAGS
;
389 static void start_getting_msg_queue(struct smi_info
*smi_info
)
391 smi_info
->curr_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
392 smi_info
->curr_msg
->data
[1] = IPMI_GET_MSG_CMD
;
393 smi_info
->curr_msg
->data_size
= 2;
395 start_new_msg(smi_info
, smi_info
->curr_msg
->data
,
396 smi_info
->curr_msg
->data_size
);
397 smi_info
->si_state
= SI_GETTING_MESSAGES
;
400 static void start_getting_events(struct smi_info
*smi_info
)
402 smi_info
->curr_msg
->data
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
403 smi_info
->curr_msg
->data
[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD
;
404 smi_info
->curr_msg
->data_size
= 2;
406 start_new_msg(smi_info
, smi_info
->curr_msg
->data
,
407 smi_info
->curr_msg
->data_size
);
408 smi_info
->si_state
= SI_GETTING_EVENTS
;
412 * When we have a situtaion where we run out of memory and cannot
413 * allocate messages, we just leave them in the BMC and run the system
414 * polled until we can allocate some memory. Once we have some
415 * memory, we will re-enable the interrupt.
417 * Note that we cannot just use disable_irq(), since the interrupt may
420 static inline bool disable_si_irq(struct smi_info
*smi_info
)
422 if ((smi_info
->io
.irq
) && (!smi_info
->interrupt_disabled
)) {
423 smi_info
->interrupt_disabled
= true;
424 start_check_enables(smi_info
);
430 static inline bool enable_si_irq(struct smi_info
*smi_info
)
432 if ((smi_info
->io
.irq
) && (smi_info
->interrupt_disabled
)) {
433 smi_info
->interrupt_disabled
= false;
434 start_check_enables(smi_info
);
441 * Allocate a message. If unable to allocate, start the interrupt
442 * disable process and return NULL. If able to allocate but
443 * interrupts are disabled, free the message and return NULL after
444 * starting the interrupt enable process.
446 static struct ipmi_smi_msg
*alloc_msg_handle_irq(struct smi_info
*smi_info
)
448 struct ipmi_smi_msg
*msg
;
450 msg
= ipmi_alloc_smi_msg();
452 if (!disable_si_irq(smi_info
))
453 smi_info
->si_state
= SI_NORMAL
;
454 } else if (enable_si_irq(smi_info
)) {
455 ipmi_free_smi_msg(msg
);
461 static void handle_flags(struct smi_info
*smi_info
)
464 if (smi_info
->msg_flags
& WDT_PRE_TIMEOUT_INT
) {
465 /* Watchdog pre-timeout */
466 smi_inc_stat(smi_info
, watchdog_pretimeouts
);
468 start_clear_flags(smi_info
);
469 smi_info
->msg_flags
&= ~WDT_PRE_TIMEOUT_INT
;
470 ipmi_smi_watchdog_pretimeout(smi_info
->intf
);
471 } else if (smi_info
->msg_flags
& RECEIVE_MSG_AVAIL
) {
472 /* Messages available. */
473 smi_info
->curr_msg
= alloc_msg_handle_irq(smi_info
);
474 if (!smi_info
->curr_msg
)
477 start_getting_msg_queue(smi_info
);
478 } else if (smi_info
->msg_flags
& EVENT_MSG_BUFFER_FULL
) {
479 /* Events available. */
480 smi_info
->curr_msg
= alloc_msg_handle_irq(smi_info
);
481 if (!smi_info
->curr_msg
)
484 start_getting_events(smi_info
);
485 } else if (smi_info
->msg_flags
& OEM_DATA_AVAIL
&&
486 smi_info
->oem_data_avail_handler
) {
487 if (smi_info
->oem_data_avail_handler(smi_info
))
490 smi_info
->si_state
= SI_NORMAL
;
494 * Global enables we care about.
496 #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
497 IPMI_BMC_EVT_MSG_INTR)
499 static u8
current_global_enables(struct smi_info
*smi_info
, u8 base
,
504 if (smi_info
->supports_event_msg_buff
)
505 enables
|= IPMI_BMC_EVT_MSG_BUFF
;
507 if (((smi_info
->io
.irq
&& !smi_info
->interrupt_disabled
) ||
508 smi_info
->cannot_disable_irq
) &&
509 !smi_info
->irq_enable_broken
)
510 enables
|= IPMI_BMC_RCV_MSG_INTR
;
512 if (smi_info
->supports_event_msg_buff
&&
513 smi_info
->io
.irq
&& !smi_info
->interrupt_disabled
&&
514 !smi_info
->irq_enable_broken
)
515 enables
|= IPMI_BMC_EVT_MSG_INTR
;
517 *irq_on
= enables
& (IPMI_BMC_EVT_MSG_INTR
| IPMI_BMC_RCV_MSG_INTR
);
522 static void check_bt_irq(struct smi_info
*smi_info
, bool irq_on
)
524 u8 irqstate
= smi_info
->io
.inputb(&smi_info
->io
, IPMI_BT_INTMASK_REG
);
526 irqstate
&= IPMI_BT_INTMASK_ENABLE_IRQ_BIT
;
528 if ((bool)irqstate
== irq_on
)
532 smi_info
->io
.outputb(&smi_info
->io
, IPMI_BT_INTMASK_REG
,
533 IPMI_BT_INTMASK_ENABLE_IRQ_BIT
);
535 smi_info
->io
.outputb(&smi_info
->io
, IPMI_BT_INTMASK_REG
, 0);
538 static void handle_transaction_done(struct smi_info
*smi_info
)
540 struct ipmi_smi_msg
*msg
;
542 debug_timestamp(smi_info
, "Done");
543 switch (smi_info
->si_state
) {
545 if (!smi_info
->curr_msg
)
548 smi_info
->curr_msg
->rsp_size
549 = smi_info
->handlers
->get_result(
551 smi_info
->curr_msg
->rsp
,
552 IPMI_MAX_MSG_LENGTH
);
555 * Do this here becase deliver_recv_msg() releases the
556 * lock, and a new message can be put in during the
557 * time the lock is released.
559 msg
= smi_info
->curr_msg
;
560 smi_info
->curr_msg
= NULL
;
561 deliver_recv_msg(smi_info
, msg
);
564 case SI_GETTING_FLAGS
:
566 unsigned char msg
[4];
569 /* We got the flags from the SMI, now handle them. */
570 len
= smi_info
->handlers
->get_result(smi_info
->si_sm
, msg
, 4);
572 /* Error fetching flags, just give up for now. */
573 smi_info
->si_state
= SI_NORMAL
;
574 } else if (len
< 4) {
576 * Hmm, no flags. That's technically illegal, but
577 * don't use uninitialized data.
579 smi_info
->si_state
= SI_NORMAL
;
581 smi_info
->msg_flags
= msg
[3];
582 handle_flags(smi_info
);
587 case SI_CLEARING_FLAGS
:
589 unsigned char msg
[3];
591 /* We cleared the flags. */
592 smi_info
->handlers
->get_result(smi_info
->si_sm
, msg
, 3);
594 /* Error clearing flags */
595 dev_warn_ratelimited(smi_info
->io
.dev
,
596 "Error clearing flags: %2.2x\n", msg
[2]);
598 smi_info
->si_state
= SI_NORMAL
;
602 case SI_GETTING_EVENTS
:
604 smi_info
->curr_msg
->rsp_size
605 = smi_info
->handlers
->get_result(
607 smi_info
->curr_msg
->rsp
,
608 IPMI_MAX_MSG_LENGTH
);
611 * Do this here becase deliver_recv_msg() releases the
612 * lock, and a new message can be put in during the
613 * time the lock is released.
615 msg
= smi_info
->curr_msg
;
616 smi_info
->curr_msg
= NULL
;
617 if (msg
->rsp
[2] != 0) {
618 /* Error getting event, probably done. */
621 /* Take off the event flag. */
622 smi_info
->msg_flags
&= ~EVENT_MSG_BUFFER_FULL
;
623 handle_flags(smi_info
);
625 smi_inc_stat(smi_info
, events
);
628 * Do this before we deliver the message
629 * because delivering the message releases the
630 * lock and something else can mess with the
633 handle_flags(smi_info
);
635 deliver_recv_msg(smi_info
, msg
);
640 case SI_GETTING_MESSAGES
:
642 smi_info
->curr_msg
->rsp_size
643 = smi_info
->handlers
->get_result(
645 smi_info
->curr_msg
->rsp
,
646 IPMI_MAX_MSG_LENGTH
);
649 * Do this here becase deliver_recv_msg() releases the
650 * lock, and a new message can be put in during the
651 * time the lock is released.
653 msg
= smi_info
->curr_msg
;
654 smi_info
->curr_msg
= NULL
;
655 if (msg
->rsp
[2] != 0) {
656 /* Error getting event, probably done. */
659 /* Take off the msg flag. */
660 smi_info
->msg_flags
&= ~RECEIVE_MSG_AVAIL
;
661 handle_flags(smi_info
);
663 smi_inc_stat(smi_info
, incoming_messages
);
666 * Do this before we deliver the message
667 * because delivering the message releases the
668 * lock and something else can mess with the
671 handle_flags(smi_info
);
673 deliver_recv_msg(smi_info
, msg
);
678 case SI_CHECKING_ENABLES
:
680 unsigned char msg
[4];
684 /* We got the flags from the SMI, now handle them. */
685 smi_info
->handlers
->get_result(smi_info
->si_sm
, msg
, 4);
687 dev_warn_ratelimited(smi_info
->io
.dev
,
688 "Couldn't get irq info: %x,\n"
689 "Maybe ok, but ipmi might run very slowly.\n",
691 smi_info
->si_state
= SI_NORMAL
;
694 enables
= current_global_enables(smi_info
, 0, &irq_on
);
695 if (smi_info
->io
.si_type
== SI_BT
)
696 /* BT has its own interrupt enable bit. */
697 check_bt_irq(smi_info
, irq_on
);
698 if (enables
!= (msg
[3] & GLOBAL_ENABLES_MASK
)) {
699 /* Enables are not correct, fix them. */
700 msg
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
701 msg
[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD
;
702 msg
[2] = enables
| (msg
[3] & ~GLOBAL_ENABLES_MASK
);
703 smi_info
->handlers
->start_transaction(
704 smi_info
->si_sm
, msg
, 3);
705 smi_info
->si_state
= SI_SETTING_ENABLES
;
706 } else if (smi_info
->supports_event_msg_buff
) {
707 smi_info
->curr_msg
= ipmi_alloc_smi_msg();
708 if (!smi_info
->curr_msg
) {
709 smi_info
->si_state
= SI_NORMAL
;
712 start_getting_events(smi_info
);
714 smi_info
->si_state
= SI_NORMAL
;
719 case SI_SETTING_ENABLES
:
721 unsigned char msg
[4];
723 smi_info
->handlers
->get_result(smi_info
->si_sm
, msg
, 4);
725 dev_warn_ratelimited(smi_info
->io
.dev
,
726 "Could not set the global enables: 0x%x.\n",
729 if (smi_info
->supports_event_msg_buff
) {
730 smi_info
->curr_msg
= ipmi_alloc_smi_msg();
731 if (!smi_info
->curr_msg
) {
732 smi_info
->si_state
= SI_NORMAL
;
735 start_getting_events(smi_info
);
737 smi_info
->si_state
= SI_NORMAL
;
745 * Called on timeouts and events. Timeouts should pass the elapsed
746 * time, interrupts should pass in zero. Must be called with
747 * si_lock held and interrupts disabled.
749 static enum si_sm_result
smi_event_handler(struct smi_info
*smi_info
,
752 enum si_sm_result si_sm_result
;
756 * There used to be a loop here that waited a little while
757 * (around 25us) before giving up. That turned out to be
758 * pointless, the minimum delays I was seeing were in the 300us
759 * range, which is far too long to wait in an interrupt. So
760 * we just run until the state machine tells us something
761 * happened or it needs a delay.
763 si_sm_result
= smi_info
->handlers
->event(smi_info
->si_sm
, time
);
765 while (si_sm_result
== SI_SM_CALL_WITHOUT_DELAY
)
766 si_sm_result
= smi_info
->handlers
->event(smi_info
->si_sm
, 0);
768 if (si_sm_result
== SI_SM_TRANSACTION_COMPLETE
) {
769 smi_inc_stat(smi_info
, complete_transactions
);
771 handle_transaction_done(smi_info
);
773 } else if (si_sm_result
== SI_SM_HOSED
) {
774 smi_inc_stat(smi_info
, hosed_count
);
777 * Do the before return_hosed_msg, because that
780 smi_info
->si_state
= SI_NORMAL
;
781 if (smi_info
->curr_msg
!= NULL
) {
783 * If we were handling a user message, format
784 * a response to send to the upper layer to
785 * tell it about the error.
787 return_hosed_msg(smi_info
, IPMI_ERR_UNSPECIFIED
);
793 * We prefer handling attn over new messages. But don't do
794 * this if there is not yet an upper layer to handle anything.
796 if (si_sm_result
== SI_SM_ATTN
|| smi_info
->got_attn
) {
797 unsigned char msg
[2];
799 if (smi_info
->si_state
!= SI_NORMAL
) {
801 * We got an ATTN, but we are doing something else.
802 * Handle the ATTN later.
804 smi_info
->got_attn
= true;
806 smi_info
->got_attn
= false;
807 smi_inc_stat(smi_info
, attentions
);
810 * Got a attn, send down a get message flags to see
811 * what's causing it. It would be better to handle
812 * this in the upper layer, but due to the way
813 * interrupts work with the SMI, that's not really
816 msg
[0] = (IPMI_NETFN_APP_REQUEST
<< 2);
817 msg
[1] = IPMI_GET_MSG_FLAGS_CMD
;
819 start_new_msg(smi_info
, msg
, 2);
820 smi_info
->si_state
= SI_GETTING_FLAGS
;
825 /* If we are currently idle, try to start the next message. */
826 if (si_sm_result
== SI_SM_IDLE
) {
827 smi_inc_stat(smi_info
, idles
);
829 si_sm_result
= start_next_msg(smi_info
);
830 if (si_sm_result
!= SI_SM_IDLE
)
834 if ((si_sm_result
== SI_SM_IDLE
)
835 && (atomic_read(&smi_info
->req_events
))) {
837 * We are idle and the upper layer requested that I fetch
840 atomic_set(&smi_info
->req_events
, 0);
843 * Take this opportunity to check the interrupt and
844 * message enable state for the BMC. The BMC can be
845 * asynchronously reset, and may thus get interrupts
846 * disable and messages disabled.
848 if (smi_info
->supports_event_msg_buff
|| smi_info
->io
.irq
) {
849 start_check_enables(smi_info
);
851 smi_info
->curr_msg
= alloc_msg_handle_irq(smi_info
);
852 if (!smi_info
->curr_msg
)
855 start_getting_events(smi_info
);
860 if (si_sm_result
== SI_SM_IDLE
&& smi_info
->timer_running
) {
861 /* Ok it if fails, the timer will just go off. */
862 if (del_timer(&smi_info
->si_timer
))
863 smi_info
->timer_running
= false;
870 static void check_start_timer_thread(struct smi_info
*smi_info
)
872 if (smi_info
->si_state
== SI_NORMAL
&& smi_info
->curr_msg
== NULL
) {
873 smi_mod_timer(smi_info
, jiffies
+ SI_TIMEOUT_JIFFIES
);
875 if (smi_info
->thread
)
876 wake_up_process(smi_info
->thread
);
878 start_next_msg(smi_info
);
879 smi_event_handler(smi_info
, 0);
883 static void flush_messages(void *send_info
)
885 struct smi_info
*smi_info
= send_info
;
886 enum si_sm_result result
;
889 * Currently, this function is called only in run-to-completion
890 * mode. This means we are single-threaded, no need for locks.
892 result
= smi_event_handler(smi_info
, 0);
893 while (result
!= SI_SM_IDLE
) {
894 udelay(SI_SHORT_TIMEOUT_USEC
);
895 result
= smi_event_handler(smi_info
, SI_SHORT_TIMEOUT_USEC
);
899 static void sender(void *send_info
,
900 struct ipmi_smi_msg
*msg
)
902 struct smi_info
*smi_info
= send_info
;
905 debug_timestamp(smi_info
, "Enqueue");
907 if (smi_info
->run_to_completion
) {
909 * If we are running to completion, start it. Upper
910 * layer will call flush_messages to clear it out.
912 smi_info
->waiting_msg
= msg
;
916 spin_lock_irqsave(&smi_info
->si_lock
, flags
);
918 * The following two lines don't need to be under the lock for
919 * the lock's sake, but they do need SMP memory barriers to
920 * avoid getting things out of order. We are already claiming
921 * the lock, anyway, so just do it under the lock to avoid the
924 BUG_ON(smi_info
->waiting_msg
);
925 smi_info
->waiting_msg
= msg
;
926 check_start_timer_thread(smi_info
);
927 spin_unlock_irqrestore(&smi_info
->si_lock
, flags
);
930 static void set_run_to_completion(void *send_info
, bool i_run_to_completion
)
932 struct smi_info
*smi_info
= send_info
;
934 smi_info
->run_to_completion
= i_run_to_completion
;
935 if (i_run_to_completion
)
936 flush_messages(smi_info
);
940 * Use -1 as a special constant to tell that we are spinning in kipmid
941 * looking for something and not delaying between checks
943 #define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
944 static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result
,
945 const struct smi_info
*smi_info
,
948 unsigned int max_busy_us
= 0;
950 if (smi_info
->si_num
< num_max_busy_us
)
951 max_busy_us
= kipmid_max_busy_us
[smi_info
->si_num
];
952 if (max_busy_us
== 0 || smi_result
!= SI_SM_CALL_WITH_DELAY
)
953 *busy_until
= IPMI_TIME_NOT_BUSY
;
954 else if (*busy_until
== IPMI_TIME_NOT_BUSY
) {
955 *busy_until
= ktime_get() + max_busy_us
* NSEC_PER_USEC
;
957 if (unlikely(ktime_get() > *busy_until
)) {
958 *busy_until
= IPMI_TIME_NOT_BUSY
;
967 * A busy-waiting loop for speeding up IPMI operation.
969 * Lousy hardware makes this hard. This is only enabled for systems
970 * that are not BT and do not have interrupts. It starts spinning
971 * when an operation is complete or until max_busy tells it to stop
972 * (if that is enabled). See the paragraph on kimid_max_busy_us in
973 * Documentation/driver-api/ipmi.rst for details.
975 static int ipmi_thread(void *data
)
977 struct smi_info
*smi_info
= data
;
979 enum si_sm_result smi_result
;
980 ktime_t busy_until
= IPMI_TIME_NOT_BUSY
;
982 set_user_nice(current
, MAX_NICE
);
983 while (!kthread_should_stop()) {
986 spin_lock_irqsave(&(smi_info
->si_lock
), flags
);
987 smi_result
= smi_event_handler(smi_info
, 0);
990 * If the driver is doing something, there is a possible
991 * race with the timer. If the timer handler see idle,
992 * and the thread here sees something else, the timer
993 * handler won't restart the timer even though it is
994 * required. So start it here if necessary.
996 if (smi_result
!= SI_SM_IDLE
&& !smi_info
->timer_running
)
997 smi_mod_timer(smi_info
, jiffies
+ SI_TIMEOUT_JIFFIES
);
999 spin_unlock_irqrestore(&(smi_info
->si_lock
), flags
);
1000 busy_wait
= ipmi_thread_busy_wait(smi_result
, smi_info
,
1002 if (smi_result
== SI_SM_CALL_WITHOUT_DELAY
) {
1004 } else if (smi_result
== SI_SM_CALL_WITH_DELAY
&& busy_wait
) {
1006 * In maintenance mode we run as fast as
1007 * possible to allow firmware updates to
1008 * complete as fast as possible, but normally
1009 * don't bang on the scheduler.
1011 if (smi_info
->in_maintenance_mode
)
1014 usleep_range(100, 200);
1015 } else if (smi_result
== SI_SM_IDLE
) {
1016 if (atomic_read(&smi_info
->need_watch
)) {
1017 schedule_timeout_interruptible(100);
1019 /* Wait to be woken up when we are needed. */
1020 __set_current_state(TASK_INTERRUPTIBLE
);
1024 schedule_timeout_interruptible(1);
1031 static void poll(void *send_info
)
1033 struct smi_info
*smi_info
= send_info
;
1034 unsigned long flags
= 0;
1035 bool run_to_completion
= smi_info
->run_to_completion
;
1038 * Make sure there is some delay in the poll loop so we can
1039 * drive time forward and timeout things.
1042 if (!run_to_completion
)
1043 spin_lock_irqsave(&smi_info
->si_lock
, flags
);
1044 smi_event_handler(smi_info
, 10);
1045 if (!run_to_completion
)
1046 spin_unlock_irqrestore(&smi_info
->si_lock
, flags
);
1049 static void request_events(void *send_info
)
1051 struct smi_info
*smi_info
= send_info
;
1053 if (!smi_info
->has_event_buffer
)
1056 atomic_set(&smi_info
->req_events
, 1);
1059 static void set_need_watch(void *send_info
, unsigned int watch_mask
)
1061 struct smi_info
*smi_info
= send_info
;
1062 unsigned long flags
;
1065 enable
= !!watch_mask
;
1067 atomic_set(&smi_info
->need_watch
, enable
);
1068 spin_lock_irqsave(&smi_info
->si_lock
, flags
);
1069 check_start_timer_thread(smi_info
);
1070 spin_unlock_irqrestore(&smi_info
->si_lock
, flags
);
1073 static void smi_timeout(struct timer_list
*t
)
1075 struct smi_info
*smi_info
= from_timer(smi_info
, t
, si_timer
);
1076 enum si_sm_result smi_result
;
1077 unsigned long flags
;
1078 unsigned long jiffies_now
;
1082 spin_lock_irqsave(&(smi_info
->si_lock
), flags
);
1083 debug_timestamp(smi_info
, "Timer");
1085 jiffies_now
= jiffies
;
1086 time_diff
= (((long)jiffies_now
- (long)smi_info
->last_timeout_jiffies
)
1087 * SI_USEC_PER_JIFFY
);
1088 smi_result
= smi_event_handler(smi_info
, time_diff
);
1090 if ((smi_info
->io
.irq
) && (!smi_info
->interrupt_disabled
)) {
1091 /* Running with interrupts, only do long timeouts. */
1092 timeout
= jiffies
+ SI_TIMEOUT_JIFFIES
;
1093 smi_inc_stat(smi_info
, long_timeouts
);
1098 * If the state machine asks for a short delay, then shorten
1099 * the timer timeout.
1101 if (smi_result
== SI_SM_CALL_WITH_DELAY
) {
1102 smi_inc_stat(smi_info
, short_timeouts
);
1103 timeout
= jiffies
+ 1;
1105 smi_inc_stat(smi_info
, long_timeouts
);
1106 timeout
= jiffies
+ SI_TIMEOUT_JIFFIES
;
1110 if (smi_result
!= SI_SM_IDLE
)
1111 smi_mod_timer(smi_info
, timeout
);
1113 smi_info
->timer_running
= false;
1114 spin_unlock_irqrestore(&(smi_info
->si_lock
), flags
);
1117 irqreturn_t
ipmi_si_irq_handler(int irq
, void *data
)
1119 struct smi_info
*smi_info
= data
;
1120 unsigned long flags
;
1122 if (smi_info
->io
.si_type
== SI_BT
)
1123 /* We need to clear the IRQ flag for the BT interface. */
1124 smi_info
->io
.outputb(&smi_info
->io
, IPMI_BT_INTMASK_REG
,
1125 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1126 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT
);
1128 spin_lock_irqsave(&(smi_info
->si_lock
), flags
);
1130 smi_inc_stat(smi_info
, interrupts
);
1132 debug_timestamp(smi_info
, "Interrupt");
1134 smi_event_handler(smi_info
, 0);
1135 spin_unlock_irqrestore(&(smi_info
->si_lock
), flags
);
1139 static int smi_start_processing(void *send_info
,
1140 struct ipmi_smi
*intf
)
1142 struct smi_info
*new_smi
= send_info
;
1145 new_smi
->intf
= intf
;
1147 /* Set up the timer that drives the interface. */
1148 timer_setup(&new_smi
->si_timer
, smi_timeout
, 0);
1149 new_smi
->timer_can_start
= true;
1150 smi_mod_timer(new_smi
, jiffies
+ SI_TIMEOUT_JIFFIES
);
1152 /* Try to claim any interrupts. */
1153 if (new_smi
->io
.irq_setup
) {
1154 new_smi
->io
.irq_handler_data
= new_smi
;
1155 new_smi
->io
.irq_setup(&new_smi
->io
);
1159 * Check if the user forcefully enabled the daemon.
1161 if (new_smi
->si_num
< num_force_kipmid
)
1162 enable
= force_kipmid
[new_smi
->si_num
];
1164 * The BT interface is efficient enough to not need a thread,
1165 * and there is no need for a thread if we have interrupts.
1167 else if ((new_smi
->io
.si_type
!= SI_BT
) && (!new_smi
->io
.irq
))
1171 new_smi
->thread
= kthread_run(ipmi_thread
, new_smi
,
1172 "kipmi%d", new_smi
->si_num
);
1173 if (IS_ERR(new_smi
->thread
)) {
1174 dev_notice(new_smi
->io
.dev
,
1175 "Could not start kernel thread due to error %ld, only using timers to drive the interface\n",
1176 PTR_ERR(new_smi
->thread
));
1177 new_smi
->thread
= NULL
;
1184 static int get_smi_info(void *send_info
, struct ipmi_smi_info
*data
)
1186 struct smi_info
*smi
= send_info
;
1188 data
->addr_src
= smi
->io
.addr_source
;
1189 data
->dev
= smi
->io
.dev
;
1190 data
->addr_info
= smi
->io
.addr_info
;
1191 get_device(smi
->io
.dev
);
1196 static void set_maintenance_mode(void *send_info
, bool enable
)
1198 struct smi_info
*smi_info
= send_info
;
1201 atomic_set(&smi_info
->req_events
, 0);
1202 smi_info
->in_maintenance_mode
= enable
;
1205 static void shutdown_smi(void *send_info
);
1206 static const struct ipmi_smi_handlers handlers
= {
1207 .owner
= THIS_MODULE
,
1208 .start_processing
= smi_start_processing
,
1209 .shutdown
= shutdown_smi
,
1210 .get_smi_info
= get_smi_info
,
1212 .request_events
= request_events
,
1213 .set_need_watch
= set_need_watch
,
1214 .set_maintenance_mode
= set_maintenance_mode
,
1215 .set_run_to_completion
= set_run_to_completion
,
1216 .flush_messages
= flush_messages
,
1220 static LIST_HEAD(smi_infos
);
1221 static DEFINE_MUTEX(smi_infos_lock
);
1222 static int smi_num
; /* Used to sequence the SMIs */
1224 static const char * const addr_space_to_str
[] = { "i/o", "mem" };
1226 module_param_array(force_kipmid
, int, &num_force_kipmid
, 0);
1227 MODULE_PARM_DESC(force_kipmid
,
1228 "Force the kipmi daemon to be enabled (1) or disabled(0). Normally the IPMI driver auto-detects this, but the value may be overridden by this parm.");
1229 module_param(unload_when_empty
, bool, 0);
1230 MODULE_PARM_DESC(unload_when_empty
,
1231 "Unload the module if no interfaces are specified or found, default is 1. Setting to 0 is useful for hot add of devices using hotmod.");
1232 module_param_array(kipmid_max_busy_us
, uint
, &num_max_busy_us
, 0644);
1233 MODULE_PARM_DESC(kipmid_max_busy_us
,
1234 "Max time (in microseconds) to busy-wait for IPMI data before sleeping. 0 (default) means to wait forever. Set to 100-500 if kipmid is using up a lot of CPU time.");
1236 void ipmi_irq_finish_setup(struct si_sm_io
*io
)
1238 if (io
->si_type
== SI_BT
)
1239 /* Enable the interrupt in the BT interface. */
1240 io
->outputb(io
, IPMI_BT_INTMASK_REG
,
1241 IPMI_BT_INTMASK_ENABLE_IRQ_BIT
);
1244 void ipmi_irq_start_cleanup(struct si_sm_io
*io
)
1246 if (io
->si_type
== SI_BT
)
1247 /* Disable the interrupt in the BT interface. */
1248 io
->outputb(io
, IPMI_BT_INTMASK_REG
, 0);
1251 static void std_irq_cleanup(struct si_sm_io
*io
)
1253 ipmi_irq_start_cleanup(io
);
1254 free_irq(io
->irq
, io
->irq_handler_data
);
1257 int ipmi_std_irq_setup(struct si_sm_io
*io
)
1264 rv
= request_irq(io
->irq
,
1265 ipmi_si_irq_handler
,
1268 io
->irq_handler_data
);
1270 dev_warn(io
->dev
, "%s unable to claim interrupt %d, running polled\n",
1271 SI_DEVICE_NAME
, io
->irq
);
1274 io
->irq_cleanup
= std_irq_cleanup
;
1275 ipmi_irq_finish_setup(io
);
1276 dev_info(io
->dev
, "Using irq %d\n", io
->irq
);
1282 static int wait_for_msg_done(struct smi_info
*smi_info
)
1284 enum si_sm_result smi_result
;
1286 smi_result
= smi_info
->handlers
->event(smi_info
->si_sm
, 0);
1288 if (smi_result
== SI_SM_CALL_WITH_DELAY
||
1289 smi_result
== SI_SM_CALL_WITH_TICK_DELAY
) {
1290 schedule_timeout_uninterruptible(1);
1291 smi_result
= smi_info
->handlers
->event(
1292 smi_info
->si_sm
, jiffies_to_usecs(1));
1293 } else if (smi_result
== SI_SM_CALL_WITHOUT_DELAY
) {
1294 smi_result
= smi_info
->handlers
->event(
1295 smi_info
->si_sm
, 0);
1299 if (smi_result
== SI_SM_HOSED
)
1301 * We couldn't get the state machine to run, so whatever's at
1302 * the port is probably not an IPMI SMI interface.
1309 static int try_get_dev_id(struct smi_info
*smi_info
)
1311 unsigned char msg
[2];
1312 unsigned char *resp
;
1313 unsigned long resp_len
;
1315 unsigned int retry_count
= 0;
1317 resp
= kmalloc(IPMI_MAX_MSG_LENGTH
, GFP_KERNEL
);
1322 * Do a Get Device ID command, since it comes back with some
1325 msg
[0] = IPMI_NETFN_APP_REQUEST
<< 2;
1326 msg
[1] = IPMI_GET_DEVICE_ID_CMD
;
1329 smi_info
->handlers
->start_transaction(smi_info
->si_sm
, msg
, 2);
1331 rv
= wait_for_msg_done(smi_info
);
1335 resp_len
= smi_info
->handlers
->get_result(smi_info
->si_sm
,
1336 resp
, IPMI_MAX_MSG_LENGTH
);
1338 /* Check and record info from the get device id, in case we need it. */
1339 rv
= ipmi_demangle_device_id(resp
[0] >> 2, resp
[1],
1340 resp
+ 2, resp_len
- 2, &smi_info
->device_id
);
1342 /* record completion code */
1343 unsigned char cc
= *(resp
+ 2);
1345 if (cc
!= IPMI_CC_NO_ERROR
&&
1346 ++retry_count
<= GET_DEVICE_ID_MAX_RETRY
) {
1347 dev_warn_ratelimited(smi_info
->io
.dev
,
1348 "BMC returned 0x%2.2x, retry get bmc device id\n",
1359 static int get_global_enables(struct smi_info
*smi_info
, u8
*enables
)
1361 unsigned char msg
[3];
1362 unsigned char *resp
;
1363 unsigned long resp_len
;
1366 resp
= kmalloc(IPMI_MAX_MSG_LENGTH
, GFP_KERNEL
);
1370 msg
[0] = IPMI_NETFN_APP_REQUEST
<< 2;
1371 msg
[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD
;
1372 smi_info
->handlers
->start_transaction(smi_info
->si_sm
, msg
, 2);
1374 rv
= wait_for_msg_done(smi_info
);
1376 dev_warn(smi_info
->io
.dev
,
1377 "Error getting response from get global enables command: %d\n",
1382 resp_len
= smi_info
->handlers
->get_result(smi_info
->si_sm
,
1383 resp
, IPMI_MAX_MSG_LENGTH
);
1386 resp
[0] != (IPMI_NETFN_APP_REQUEST
| 1) << 2 ||
1387 resp
[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD
||
1389 dev_warn(smi_info
->io
.dev
,
1390 "Invalid return from get global enables command: %ld %x %x %x\n",
1391 resp_len
, resp
[0], resp
[1], resp
[2]);
1404 * Returns 1 if it gets an error from the command.
1406 static int set_global_enables(struct smi_info
*smi_info
, u8 enables
)
1408 unsigned char msg
[3];
1409 unsigned char *resp
;
1410 unsigned long resp_len
;
1413 resp
= kmalloc(IPMI_MAX_MSG_LENGTH
, GFP_KERNEL
);
1417 msg
[0] = IPMI_NETFN_APP_REQUEST
<< 2;
1418 msg
[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD
;
1420 smi_info
->handlers
->start_transaction(smi_info
->si_sm
, msg
, 3);
1422 rv
= wait_for_msg_done(smi_info
);
1424 dev_warn(smi_info
->io
.dev
,
1425 "Error getting response from set global enables command: %d\n",
1430 resp_len
= smi_info
->handlers
->get_result(smi_info
->si_sm
,
1431 resp
, IPMI_MAX_MSG_LENGTH
);
1434 resp
[0] != (IPMI_NETFN_APP_REQUEST
| 1) << 2 ||
1435 resp
[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD
) {
1436 dev_warn(smi_info
->io
.dev
,
1437 "Invalid return from set global enables command: %ld %x %x\n",
1438 resp_len
, resp
[0], resp
[1]);
1452 * Some BMCs do not support clearing the receive irq bit in the global
1453 * enables (even if they don't support interrupts on the BMC). Check
1454 * for this and handle it properly.
1456 static void check_clr_rcv_irq(struct smi_info
*smi_info
)
1461 rv
= get_global_enables(smi_info
, &enables
);
1463 if ((enables
& IPMI_BMC_RCV_MSG_INTR
) == 0)
1464 /* Already clear, should work ok. */
1467 enables
&= ~IPMI_BMC_RCV_MSG_INTR
;
1468 rv
= set_global_enables(smi_info
, enables
);
1472 dev_err(smi_info
->io
.dev
,
1473 "Cannot check clearing the rcv irq: %d\n", rv
);
1479 * An error when setting the event buffer bit means
1480 * clearing the bit is not supported.
1482 dev_warn(smi_info
->io
.dev
,
1483 "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1484 smi_info
->cannot_disable_irq
= true;
1489 * Some BMCs do not support setting the interrupt bits in the global
1490 * enables even if they support interrupts. Clearly bad, but we can
1493 static void check_set_rcv_irq(struct smi_info
*smi_info
)
1498 if (!smi_info
->io
.irq
)
1501 rv
= get_global_enables(smi_info
, &enables
);
1503 enables
|= IPMI_BMC_RCV_MSG_INTR
;
1504 rv
= set_global_enables(smi_info
, enables
);
1508 dev_err(smi_info
->io
.dev
,
1509 "Cannot check setting the rcv irq: %d\n", rv
);
1515 * An error when setting the event buffer bit means
1516 * setting the bit is not supported.
1518 dev_warn(smi_info
->io
.dev
,
1519 "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1520 smi_info
->cannot_disable_irq
= true;
1521 smi_info
->irq_enable_broken
= true;
1525 static int try_enable_event_buffer(struct smi_info
*smi_info
)
1527 unsigned char msg
[3];
1528 unsigned char *resp
;
1529 unsigned long resp_len
;
1532 resp
= kmalloc(IPMI_MAX_MSG_LENGTH
, GFP_KERNEL
);
1536 msg
[0] = IPMI_NETFN_APP_REQUEST
<< 2;
1537 msg
[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD
;
1538 smi_info
->handlers
->start_transaction(smi_info
->si_sm
, msg
, 2);
1540 rv
= wait_for_msg_done(smi_info
);
1542 pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
1546 resp_len
= smi_info
->handlers
->get_result(smi_info
->si_sm
,
1547 resp
, IPMI_MAX_MSG_LENGTH
);
1550 resp
[0] != (IPMI_NETFN_APP_REQUEST
| 1) << 2 ||
1551 resp
[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD
||
1553 pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
1558 if (resp
[3] & IPMI_BMC_EVT_MSG_BUFF
) {
1559 /* buffer is already enabled, nothing to do. */
1560 smi_info
->supports_event_msg_buff
= true;
1564 msg
[0] = IPMI_NETFN_APP_REQUEST
<< 2;
1565 msg
[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD
;
1566 msg
[2] = resp
[3] | IPMI_BMC_EVT_MSG_BUFF
;
1567 smi_info
->handlers
->start_transaction(smi_info
->si_sm
, msg
, 3);
1569 rv
= wait_for_msg_done(smi_info
);
1571 pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
1575 resp_len
= smi_info
->handlers
->get_result(smi_info
->si_sm
,
1576 resp
, IPMI_MAX_MSG_LENGTH
);
1579 resp
[0] != (IPMI_NETFN_APP_REQUEST
| 1) << 2 ||
1580 resp
[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD
) {
1581 pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
1588 * An error when setting the event buffer bit means
1589 * that the event buffer is not supported.
1593 smi_info
->supports_event_msg_buff
= true;
1600 #define IPMI_SI_ATTR(name) \
1601 static ssize_t name##_show(struct device *dev, \
1602 struct device_attribute *attr, \
1605 struct smi_info *smi_info = dev_get_drvdata(dev); \
1607 return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \
1609 static DEVICE_ATTR_RO(name)
1611 static ssize_t
type_show(struct device
*dev
,
1612 struct device_attribute
*attr
,
1615 struct smi_info
*smi_info
= dev_get_drvdata(dev
);
1617 return sysfs_emit(buf
, "%s\n", si_to_str
[smi_info
->io
.si_type
]);
1619 static DEVICE_ATTR_RO(type
);
1621 static ssize_t
interrupts_enabled_show(struct device
*dev
,
1622 struct device_attribute
*attr
,
1625 struct smi_info
*smi_info
= dev_get_drvdata(dev
);
1626 int enabled
= smi_info
->io
.irq
&& !smi_info
->interrupt_disabled
;
1628 return sysfs_emit(buf
, "%d\n", enabled
);
1630 static DEVICE_ATTR_RO(interrupts_enabled
);
1632 IPMI_SI_ATTR(short_timeouts
);
1633 IPMI_SI_ATTR(long_timeouts
);
1634 IPMI_SI_ATTR(idles
);
1635 IPMI_SI_ATTR(interrupts
);
1636 IPMI_SI_ATTR(attentions
);
1637 IPMI_SI_ATTR(flag_fetches
);
1638 IPMI_SI_ATTR(hosed_count
);
1639 IPMI_SI_ATTR(complete_transactions
);
1640 IPMI_SI_ATTR(events
);
1641 IPMI_SI_ATTR(watchdog_pretimeouts
);
1642 IPMI_SI_ATTR(incoming_messages
);
1644 static ssize_t
params_show(struct device
*dev
,
1645 struct device_attribute
*attr
,
1648 struct smi_info
*smi_info
= dev_get_drvdata(dev
);
1650 return sysfs_emit(buf
,
1651 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1652 si_to_str
[smi_info
->io
.si_type
],
1653 addr_space_to_str
[smi_info
->io
.addr_space
],
1654 smi_info
->io
.addr_data
,
1655 smi_info
->io
.regspacing
,
1656 smi_info
->io
.regsize
,
1657 smi_info
->io
.regshift
,
1659 smi_info
->io
.slave_addr
);
1661 static DEVICE_ATTR_RO(params
);
1663 static struct attribute
*ipmi_si_dev_attrs
[] = {
1664 &dev_attr_type
.attr
,
1665 &dev_attr_interrupts_enabled
.attr
,
1666 &dev_attr_short_timeouts
.attr
,
1667 &dev_attr_long_timeouts
.attr
,
1668 &dev_attr_idles
.attr
,
1669 &dev_attr_interrupts
.attr
,
1670 &dev_attr_attentions
.attr
,
1671 &dev_attr_flag_fetches
.attr
,
1672 &dev_attr_hosed_count
.attr
,
1673 &dev_attr_complete_transactions
.attr
,
1674 &dev_attr_events
.attr
,
1675 &dev_attr_watchdog_pretimeouts
.attr
,
1676 &dev_attr_incoming_messages
.attr
,
1677 &dev_attr_params
.attr
,
1681 static const struct attribute_group ipmi_si_dev_attr_group
= {
1682 .attrs
= ipmi_si_dev_attrs
,
1686 * oem_data_avail_to_receive_msg_avail
1687 * @info - smi_info structure with msg_flags set
1689 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1690 * Returns 1 indicating need to re-run handle_flags().
1692 static int oem_data_avail_to_receive_msg_avail(struct smi_info
*smi_info
)
1694 smi_info
->msg_flags
= ((smi_info
->msg_flags
& ~OEM_DATA_AVAIL
) |
1700 * setup_dell_poweredge_oem_data_handler
1701 * @info - smi_info.device_id must be populated
1703 * Systems that match, but have firmware version < 1.40 may assert
1704 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
1705 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
1706 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
1707 * as RECEIVE_MSG_AVAIL instead.
1709 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
1710 * assert the OEM[012] bits, and if it did, the driver would have to
1711 * change to handle that properly, we don't actually check for the
1713 * Device ID = 0x20 BMC on PowerEdge 8G servers
1714 * Device Revision = 0x80
1715 * Firmware Revision1 = 0x01 BMC version 1.40
1716 * Firmware Revision2 = 0x40 BCD encoded
1717 * IPMI Version = 0x51 IPMI 1.5
1718 * Manufacturer ID = A2 02 00 Dell IANA
1720 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
1721 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
1724 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
1725 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
1726 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
1727 #define DELL_IANA_MFR_ID 0x0002a2
1728 static void setup_dell_poweredge_oem_data_handler(struct smi_info
*smi_info
)
1730 struct ipmi_device_id
*id
= &smi_info
->device_id
;
1731 if (id
->manufacturer_id
== DELL_IANA_MFR_ID
) {
1732 if (id
->device_id
== DELL_POWEREDGE_8G_BMC_DEVICE_ID
&&
1733 id
->device_revision
== DELL_POWEREDGE_8G_BMC_DEVICE_REV
&&
1734 id
->ipmi_version
== DELL_POWEREDGE_8G_BMC_IPMI_VERSION
) {
1735 smi_info
->oem_data_avail_handler
=
1736 oem_data_avail_to_receive_msg_avail
;
1737 } else if (ipmi_version_major(id
) < 1 ||
1738 (ipmi_version_major(id
) == 1 &&
1739 ipmi_version_minor(id
) < 5)) {
1740 smi_info
->oem_data_avail_handler
=
1741 oem_data_avail_to_receive_msg_avail
;
1746 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
1747 static void return_hosed_msg_badsize(struct smi_info
*smi_info
)
1749 struct ipmi_smi_msg
*msg
= smi_info
->curr_msg
;
1751 /* Make it a response */
1752 msg
->rsp
[0] = msg
->data
[0] | 4;
1753 msg
->rsp
[1] = msg
->data
[1];
1754 msg
->rsp
[2] = CANNOT_RETURN_REQUESTED_LENGTH
;
1756 smi_info
->curr_msg
= NULL
;
1757 deliver_recv_msg(smi_info
, msg
);
1761 * dell_poweredge_bt_xaction_handler
1762 * @info - smi_info.device_id must be populated
1764 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
1765 * not respond to a Get SDR command if the length of the data
1766 * requested is exactly 0x3A, which leads to command timeouts and no
1767 * data returned. This intercepts such commands, and causes userspace
1768 * callers to try again with a different-sized buffer, which succeeds.
1771 #define STORAGE_NETFN 0x0A
1772 #define STORAGE_CMD_GET_SDR 0x23
1773 static int dell_poweredge_bt_xaction_handler(struct notifier_block
*self
,
1774 unsigned long unused
,
1777 struct smi_info
*smi_info
= in
;
1778 unsigned char *data
= smi_info
->curr_msg
->data
;
1779 unsigned int size
= smi_info
->curr_msg
->data_size
;
1781 (data
[0]>>2) == STORAGE_NETFN
&&
1782 data
[1] == STORAGE_CMD_GET_SDR
&&
1784 return_hosed_msg_badsize(smi_info
);
1790 static struct notifier_block dell_poweredge_bt_xaction_notifier
= {
1791 .notifier_call
= dell_poweredge_bt_xaction_handler
,
1795 * setup_dell_poweredge_bt_xaction_handler
1796 * @info - smi_info.device_id must be filled in already
1798 * Fills in smi_info.device_id.start_transaction_pre_hook
1799 * when we know what function to use there.
1802 setup_dell_poweredge_bt_xaction_handler(struct smi_info
*smi_info
)
1804 struct ipmi_device_id
*id
= &smi_info
->device_id
;
1805 if (id
->manufacturer_id
== DELL_IANA_MFR_ID
&&
1806 smi_info
->io
.si_type
== SI_BT
)
1807 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier
);
1811 * setup_oem_data_handler
1812 * @info - smi_info.device_id must be filled in already
1814 * Fills in smi_info.device_id.oem_data_available_handler
1815 * when we know what function to use there.
1818 static void setup_oem_data_handler(struct smi_info
*smi_info
)
1820 setup_dell_poweredge_oem_data_handler(smi_info
);
1823 static void setup_xaction_handlers(struct smi_info
*smi_info
)
1825 setup_dell_poweredge_bt_xaction_handler(smi_info
);
1828 static void check_for_broken_irqs(struct smi_info
*smi_info
)
1830 check_clr_rcv_irq(smi_info
);
1831 check_set_rcv_irq(smi_info
);
1834 static inline void stop_timer_and_thread(struct smi_info
*smi_info
)
1836 if (smi_info
->thread
!= NULL
) {
1837 kthread_stop(smi_info
->thread
);
1838 smi_info
->thread
= NULL
;
1841 smi_info
->timer_can_start
= false;
1842 del_timer_sync(&smi_info
->si_timer
);
1845 static struct smi_info
*find_dup_si(struct smi_info
*info
)
1849 list_for_each_entry(e
, &smi_infos
, link
) {
1850 if (e
->io
.addr_space
!= info
->io
.addr_space
)
1852 if (e
->io
.addr_data
== info
->io
.addr_data
) {
1854 * This is a cheap hack, ACPI doesn't have a defined
1855 * slave address but SMBIOS does. Pick it up from
1856 * any source that has it available.
1858 if (info
->io
.slave_addr
&& !e
->io
.slave_addr
)
1859 e
->io
.slave_addr
= info
->io
.slave_addr
;
1867 int ipmi_si_add_smi(struct si_sm_io
*io
)
1870 struct smi_info
*new_smi
, *dup
;
1873 * If the user gave us a hard-coded device at the same
1874 * address, they presumably want us to use it and not what is
1877 if (io
->addr_source
!= SI_HARDCODED
&& io
->addr_source
!= SI_HOTMOD
&&
1878 ipmi_si_hardcode_match(io
->addr_space
, io
->addr_data
)) {
1880 "Hard-coded device at this address already exists");
1884 if (!io
->io_setup
) {
1885 if (IS_ENABLED(CONFIG_HAS_IOPORT
) &&
1886 io
->addr_space
== IPMI_IO_ADDR_SPACE
) {
1887 io
->io_setup
= ipmi_si_port_setup
;
1888 } else if (io
->addr_space
== IPMI_MEM_ADDR_SPACE
) {
1889 io
->io_setup
= ipmi_si_mem_setup
;
1895 new_smi
= kzalloc(sizeof(*new_smi
), GFP_KERNEL
);
1898 spin_lock_init(&new_smi
->si_lock
);
1902 mutex_lock(&smi_infos_lock
);
1903 dup
= find_dup_si(new_smi
);
1905 if (new_smi
->io
.addr_source
== SI_ACPI
&&
1906 dup
->io
.addr_source
== SI_SMBIOS
) {
1907 /* We prefer ACPI over SMBIOS. */
1908 dev_info(dup
->io
.dev
,
1909 "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
1910 si_to_str
[new_smi
->io
.si_type
]);
1911 cleanup_one_si(dup
);
1913 dev_info(new_smi
->io
.dev
,
1914 "%s-specified %s state machine: duplicate\n",
1915 ipmi_addr_src_to_str(new_smi
->io
.addr_source
),
1916 si_to_str
[new_smi
->io
.si_type
]);
1923 pr_info("Adding %s-specified %s state machine\n",
1924 ipmi_addr_src_to_str(new_smi
->io
.addr_source
),
1925 si_to_str
[new_smi
->io
.si_type
]);
1927 list_add_tail(&new_smi
->link
, &smi_infos
);
1930 rv
= try_smi_init(new_smi
);
1932 mutex_unlock(&smi_infos_lock
);
1937 * Try to start up an interface. Must be called with smi_infos_lock
1938 * held, primarily to keep smi_num consistent, we only one to do these
1941 static int try_smi_init(struct smi_info
*new_smi
)
1946 pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
1947 ipmi_addr_src_to_str(new_smi
->io
.addr_source
),
1948 si_to_str
[new_smi
->io
.si_type
],
1949 addr_space_to_str
[new_smi
->io
.addr_space
],
1950 new_smi
->io
.addr_data
,
1951 new_smi
->io
.slave_addr
, new_smi
->io
.irq
);
1953 switch (new_smi
->io
.si_type
) {
1955 new_smi
->handlers
= &kcs_smi_handlers
;
1959 new_smi
->handlers
= &smic_smi_handlers
;
1963 new_smi
->handlers
= &bt_smi_handlers
;
1967 /* No support for anything else yet. */
1972 new_smi
->si_num
= smi_num
;
1974 /* Do this early so it's available for logs. */
1975 if (!new_smi
->io
.dev
) {
1976 pr_err("IPMI interface added with no device\n");
1981 /* Allocate the state machine's data and initialize it. */
1982 new_smi
->si_sm
= kmalloc(new_smi
->handlers
->size(), GFP_KERNEL
);
1983 if (!new_smi
->si_sm
) {
1987 new_smi
->io
.io_size
= new_smi
->handlers
->init_data(new_smi
->si_sm
,
1990 /* Now that we know the I/O size, we can set up the I/O. */
1991 rv
= new_smi
->io
.io_setup(&new_smi
->io
);
1993 dev_err(new_smi
->io
.dev
, "Could not set up I/O space\n");
1997 /* Do low-level detection first. */
1998 if (new_smi
->handlers
->detect(new_smi
->si_sm
)) {
1999 if (new_smi
->io
.addr_source
)
2000 dev_err(new_smi
->io
.dev
,
2001 "Interface detection failed\n");
2007 * Attempt a get device id command. If it fails, we probably
2008 * don't have a BMC here.
2010 rv
= try_get_dev_id(new_smi
);
2012 if (new_smi
->io
.addr_source
)
2013 dev_err(new_smi
->io
.dev
,
2014 "There appears to be no BMC at this location\n");
2018 setup_oem_data_handler(new_smi
);
2019 setup_xaction_handlers(new_smi
);
2020 check_for_broken_irqs(new_smi
);
2022 new_smi
->waiting_msg
= NULL
;
2023 new_smi
->curr_msg
= NULL
;
2024 atomic_set(&new_smi
->req_events
, 0);
2025 new_smi
->run_to_completion
= false;
2026 for (i
= 0; i
< SI_NUM_STATS
; i
++)
2027 atomic_set(&new_smi
->stats
[i
], 0);
2029 new_smi
->interrupt_disabled
= true;
2030 atomic_set(&new_smi
->need_watch
, 0);
2032 rv
= try_enable_event_buffer(new_smi
);
2034 new_smi
->has_event_buffer
= true;
2037 * Start clearing the flags before we enable interrupts or the
2038 * timer to avoid racing with the timer.
2040 start_clear_flags(new_smi
);
2043 * IRQ is defined to be set when non-zero. req_events will
2044 * cause a global flags check that will enable interrupts.
2046 if (new_smi
->io
.irq
) {
2047 new_smi
->interrupt_disabled
= false;
2048 atomic_set(&new_smi
->req_events
, 1);
2051 dev_set_drvdata(new_smi
->io
.dev
, new_smi
);
2052 rv
= device_add_group(new_smi
->io
.dev
, &ipmi_si_dev_attr_group
);
2054 dev_err(new_smi
->io
.dev
,
2055 "Unable to add device attributes: error %d\n",
2059 new_smi
->dev_group_added
= true;
2061 rv
= ipmi_register_smi(&handlers
,
2064 new_smi
->io
.slave_addr
);
2066 dev_err(new_smi
->io
.dev
,
2067 "Unable to register device: error %d\n",
2072 /* Don't increment till we know we have succeeded. */
2075 dev_info(new_smi
->io
.dev
, "IPMI %s interface initialized\n",
2076 si_to_str
[new_smi
->io
.si_type
]);
2078 WARN_ON(new_smi
->io
.dev
->init_name
!= NULL
);
2081 if (rv
&& new_smi
->io
.io_cleanup
) {
2082 new_smi
->io
.io_cleanup(&new_smi
->io
);
2083 new_smi
->io
.io_cleanup
= NULL
;
2086 if (rv
&& new_smi
->si_sm
) {
2087 kfree(new_smi
->si_sm
);
2088 new_smi
->si_sm
= NULL
;
2094 static int __init
init_ipmi_si(void)
2097 enum ipmi_addr_src type
= SI_INVALID
;
2102 ipmi_hardcode_init();
2104 pr_info("IPMI System Interface driver\n");
2106 ipmi_si_platform_init();
2110 ipmi_si_parisc_init();
2112 /* We prefer devices with interrupts, but in the case of a machine
2113 with multiple BMCs we assume that there will be several instances
2114 of a given type so if we succeed in registering a type then also
2115 try to register everything else of the same type */
2116 mutex_lock(&smi_infos_lock
);
2117 list_for_each_entry(e
, &smi_infos
, link
) {
2118 /* Try to register a device if it has an IRQ and we either
2119 haven't successfully registered a device yet or this
2120 device has the same type as one we successfully registered */
2121 if (e
->io
.irq
&& (!type
|| e
->io
.addr_source
== type
)) {
2122 if (!try_smi_init(e
)) {
2123 type
= e
->io
.addr_source
;
2128 /* type will only have been set if we successfully registered an si */
2130 goto skip_fallback_noirq
;
2132 /* Fall back to the preferred device */
2134 list_for_each_entry(e
, &smi_infos
, link
) {
2135 if (!e
->io
.irq
&& (!type
|| e
->io
.addr_source
== type
)) {
2136 if (!try_smi_init(e
)) {
2137 type
= e
->io
.addr_source
;
2142 skip_fallback_noirq
:
2144 mutex_unlock(&smi_infos_lock
);
2149 mutex_lock(&smi_infos_lock
);
2150 if (unload_when_empty
&& list_empty(&smi_infos
)) {
2151 mutex_unlock(&smi_infos_lock
);
2153 pr_warn("Unable to find any System Interface(s)\n");
2156 mutex_unlock(&smi_infos_lock
);
2160 module_init(init_ipmi_si
);
2162 static void wait_msg_processed(struct smi_info
*smi_info
)
2164 unsigned long jiffies_now
;
2167 while (smi_info
->curr_msg
|| (smi_info
->si_state
!= SI_NORMAL
)) {
2168 jiffies_now
= jiffies
;
2169 time_diff
= (((long)jiffies_now
- (long)smi_info
->last_timeout_jiffies
)
2170 * SI_USEC_PER_JIFFY
);
2171 smi_event_handler(smi_info
, time_diff
);
2172 schedule_timeout_uninterruptible(1);
2176 static void shutdown_smi(void *send_info
)
2178 struct smi_info
*smi_info
= send_info
;
2180 if (smi_info
->dev_group_added
) {
2181 device_remove_group(smi_info
->io
.dev
, &ipmi_si_dev_attr_group
);
2182 smi_info
->dev_group_added
= false;
2184 if (smi_info
->io
.dev
)
2185 dev_set_drvdata(smi_info
->io
.dev
, NULL
);
2188 * Make sure that interrupts, the timer and the thread are
2189 * stopped and will not run again.
2191 smi_info
->interrupt_disabled
= true;
2192 if (smi_info
->io
.irq_cleanup
) {
2193 smi_info
->io
.irq_cleanup(&smi_info
->io
);
2194 smi_info
->io
.irq_cleanup
= NULL
;
2196 stop_timer_and_thread(smi_info
);
2199 * Wait until we know that we are out of any interrupt
2200 * handlers might have been running before we freed the
2206 * Timeouts are stopped, now make sure the interrupts are off
2207 * in the BMC. Note that timers and CPU interrupts are off,
2208 * so no need for locks.
2210 wait_msg_processed(smi_info
);
2212 if (smi_info
->handlers
)
2213 disable_si_irq(smi_info
);
2215 wait_msg_processed(smi_info
);
2217 if (smi_info
->handlers
)
2218 smi_info
->handlers
->cleanup(smi_info
->si_sm
);
2220 if (smi_info
->io
.io_cleanup
) {
2221 smi_info
->io
.io_cleanup(&smi_info
->io
);
2222 smi_info
->io
.io_cleanup
= NULL
;
2225 kfree(smi_info
->si_sm
);
2226 smi_info
->si_sm
= NULL
;
2228 smi_info
->intf
= NULL
;
2232 * Must be called with smi_infos_lock held, to serialize the
2233 * smi_info->intf check.
2235 static void cleanup_one_si(struct smi_info
*smi_info
)
2240 list_del(&smi_info
->link
);
2241 ipmi_unregister_smi(smi_info
->intf
);
2245 void ipmi_si_remove_by_dev(struct device
*dev
)
2249 mutex_lock(&smi_infos_lock
);
2250 list_for_each_entry(e
, &smi_infos
, link
) {
2251 if (e
->io
.dev
== dev
) {
2256 mutex_unlock(&smi_infos_lock
);
2259 struct device
*ipmi_si_remove_by_data(int addr_space
, enum si_type si_type
,
2263 struct smi_info
*e
, *tmp_e
;
2264 struct device
*dev
= NULL
;
2266 mutex_lock(&smi_infos_lock
);
2267 list_for_each_entry_safe(e
, tmp_e
, &smi_infos
, link
) {
2268 if (e
->io
.addr_space
!= addr_space
)
2270 if (e
->io
.si_type
!= si_type
)
2272 if (e
->io
.addr_data
== addr
) {
2273 dev
= get_device(e
->io
.dev
);
2277 mutex_unlock(&smi_infos_lock
);
2282 static void cleanup_ipmi_si(void)
2284 struct smi_info
*e
, *tmp_e
;
2289 ipmi_si_pci_shutdown();
2291 ipmi_si_parisc_shutdown();
2293 ipmi_si_platform_shutdown();
2295 mutex_lock(&smi_infos_lock
);
2296 list_for_each_entry_safe(e
, tmp_e
, &smi_infos
, link
)
2298 mutex_unlock(&smi_infos_lock
);
2300 ipmi_si_hardcode_exit();
2301 ipmi_si_hotmod_exit();
2303 module_exit(cleanup_ipmi_si
);
2305 MODULE_ALIAS("platform:dmi-ipmi-si");
2306 MODULE_LICENSE("GPL");
2307 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2308 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");