Linux-2.6.12-rc2
[linux-2.6/next.git] / drivers / char / ipmi / ipmi_si_intf.c
blob29de259a981e51980cdf01336677043d3037a543
1 /*
2 * ipmi_si.c
4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5 * BT).
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
11 * Copyright 2002 MontaVista Software Inc.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <asm/irq.h>
55 #ifdef CONFIG_HIGH_RES_TIMERS
56 #include <linux/hrtime.h>
57 # if defined(schedule_next_int)
58 /* Old high-res timer code, do translations. */
59 # define get_arch_cycles(a) quick_update_jiffies_sub(a)
60 # define arch_cycles_per_jiffy cycles_per_jiffies
61 # endif
62 static inline void add_usec_to_timer(struct timer_list *t, long v)
64 t->sub_expires += nsec_to_arch_cycle(v * 1000);
65 while (t->sub_expires >= arch_cycles_per_jiffy)
67 t->expires++;
68 t->sub_expires -= arch_cycles_per_jiffy;
71 #endif
72 #include <linux/interrupt.h>
73 #include <linux/rcupdate.h>
74 #include <linux/ipmi_smi.h>
75 #include <asm/io.h>
76 #include "ipmi_si_sm.h"
77 #include <linux/init.h>
79 #define IPMI_SI_VERSION "v33"
81 /* Measure times between events in the driver. */
82 #undef DEBUG_TIMING
84 /* Call every 10 ms. */
85 #define SI_TIMEOUT_TIME_USEC 10000
86 #define SI_USEC_PER_JIFFY (1000000/HZ)
87 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
88 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
89 short timeout */
91 enum si_intf_state {
92 SI_NORMAL,
93 SI_GETTING_FLAGS,
94 SI_GETTING_EVENTS,
95 SI_CLEARING_FLAGS,
96 SI_CLEARING_FLAGS_THEN_SET_IRQ,
97 SI_GETTING_MESSAGES,
98 SI_ENABLE_INTERRUPTS1,
99 SI_ENABLE_INTERRUPTS2
100 /* FIXME - add watchdog stuff. */
103 enum si_type {
104 SI_KCS, SI_SMIC, SI_BT
107 struct smi_info
109 ipmi_smi_t intf;
110 struct si_sm_data *si_sm;
111 struct si_sm_handlers *handlers;
112 enum si_type si_type;
113 spinlock_t si_lock;
114 spinlock_t msg_lock;
115 struct list_head xmit_msgs;
116 struct list_head hp_xmit_msgs;
117 struct ipmi_smi_msg *curr_msg;
118 enum si_intf_state si_state;
120 /* Used to handle the various types of I/O that can occur with
121 IPMI */
122 struct si_sm_io io;
123 int (*io_setup)(struct smi_info *info);
124 void (*io_cleanup)(struct smi_info *info);
125 int (*irq_setup)(struct smi_info *info);
126 void (*irq_cleanup)(struct smi_info *info);
127 unsigned int io_size;
129 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
130 is set to hold the flags until we are done handling everything
131 from the flags. */
132 #define RECEIVE_MSG_AVAIL 0x01
133 #define EVENT_MSG_BUFFER_FULL 0x02
134 #define WDT_PRE_TIMEOUT_INT 0x08
135 unsigned char msg_flags;
137 /* If set to true, this will request events the next time the
138 state machine is idle. */
139 atomic_t req_events;
141 /* If true, run the state machine to completion on every send
142 call. Generally used after a panic to make sure stuff goes
143 out. */
144 int run_to_completion;
146 /* The I/O port of an SI interface. */
147 int port;
149 /* The space between start addresses of the two ports. For
150 instance, if the first port is 0xca2 and the spacing is 4, then
151 the second port is 0xca6. */
152 unsigned int spacing;
154 /* zero if no irq; */
155 int irq;
157 /* The timer for this si. */
158 struct timer_list si_timer;
160 /* The time (in jiffies) the last timeout occurred at. */
161 unsigned long last_timeout_jiffies;
163 /* Used to gracefully stop the timer without race conditions. */
164 volatile int stop_operation;
165 volatile int timer_stopped;
167 /* The driver will disable interrupts when it gets into a
168 situation where it cannot handle messages due to lack of
169 memory. Once that situation clears up, it will re-enable
170 interrupts. */
171 int interrupt_disabled;
173 unsigned char ipmi_si_dev_rev;
174 unsigned char ipmi_si_fw_rev_major;
175 unsigned char ipmi_si_fw_rev_minor;
176 unsigned char ipmi_version_major;
177 unsigned char ipmi_version_minor;
179 /* Slave address, could be reported from DMI. */
180 unsigned char slave_addr;
182 /* Counters and things for the proc filesystem. */
183 spinlock_t count_lock;
184 unsigned long short_timeouts;
185 unsigned long long_timeouts;
186 unsigned long timeout_restarts;
187 unsigned long idles;
188 unsigned long interrupts;
189 unsigned long attentions;
190 unsigned long flag_fetches;
191 unsigned long hosed_count;
192 unsigned long complete_transactions;
193 unsigned long events;
194 unsigned long watchdog_pretimeouts;
195 unsigned long incoming_messages;
198 static void si_restart_short_timer(struct smi_info *smi_info);
200 static void deliver_recv_msg(struct smi_info *smi_info,
201 struct ipmi_smi_msg *msg)
203 /* Deliver the message to the upper layer with the lock
204 released. */
205 spin_unlock(&(smi_info->si_lock));
206 ipmi_smi_msg_received(smi_info->intf, msg);
207 spin_lock(&(smi_info->si_lock));
210 static void return_hosed_msg(struct smi_info *smi_info)
212 struct ipmi_smi_msg *msg = smi_info->curr_msg;
214 /* Make it a reponse */
215 msg->rsp[0] = msg->data[0] | 4;
216 msg->rsp[1] = msg->data[1];
217 msg->rsp[2] = 0xFF; /* Unknown error. */
218 msg->rsp_size = 3;
220 smi_info->curr_msg = NULL;
221 deliver_recv_msg(smi_info, msg);
224 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
226 int rv;
227 struct list_head *entry = NULL;
228 #ifdef DEBUG_TIMING
229 struct timeval t;
230 #endif
232 /* No need to save flags, we aleady have interrupts off and we
233 already hold the SMI lock. */
234 spin_lock(&(smi_info->msg_lock));
236 /* Pick the high priority queue first. */
237 if (! list_empty(&(smi_info->hp_xmit_msgs))) {
238 entry = smi_info->hp_xmit_msgs.next;
239 } else if (! list_empty(&(smi_info->xmit_msgs))) {
240 entry = smi_info->xmit_msgs.next;
243 if (!entry) {
244 smi_info->curr_msg = NULL;
245 rv = SI_SM_IDLE;
246 } else {
247 int err;
249 list_del(entry);
250 smi_info->curr_msg = list_entry(entry,
251 struct ipmi_smi_msg,
252 link);
253 #ifdef DEBUG_TIMING
254 do_gettimeofday(&t);
255 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
256 #endif
257 err = smi_info->handlers->start_transaction(
258 smi_info->si_sm,
259 smi_info->curr_msg->data,
260 smi_info->curr_msg->data_size);
261 if (err) {
262 return_hosed_msg(smi_info);
265 rv = SI_SM_CALL_WITHOUT_DELAY;
267 spin_unlock(&(smi_info->msg_lock));
269 return rv;
272 static void start_enable_irq(struct smi_info *smi_info)
274 unsigned char msg[2];
276 /* If we are enabling interrupts, we have to tell the
277 BMC to use them. */
278 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
279 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
281 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
282 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
285 static void start_clear_flags(struct smi_info *smi_info)
287 unsigned char msg[3];
289 /* Make sure the watchdog pre-timeout flag is not set at startup. */
290 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
291 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
292 msg[2] = WDT_PRE_TIMEOUT_INT;
294 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
295 smi_info->si_state = SI_CLEARING_FLAGS;
298 /* When we have a situtaion where we run out of memory and cannot
299 allocate messages, we just leave them in the BMC and run the system
300 polled until we can allocate some memory. Once we have some
301 memory, we will re-enable the interrupt. */
302 static inline void disable_si_irq(struct smi_info *smi_info)
304 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
305 disable_irq_nosync(smi_info->irq);
306 smi_info->interrupt_disabled = 1;
310 static inline void enable_si_irq(struct smi_info *smi_info)
312 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
313 enable_irq(smi_info->irq);
314 smi_info->interrupt_disabled = 0;
318 static void handle_flags(struct smi_info *smi_info)
320 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
321 /* Watchdog pre-timeout */
322 spin_lock(&smi_info->count_lock);
323 smi_info->watchdog_pretimeouts++;
324 spin_unlock(&smi_info->count_lock);
326 start_clear_flags(smi_info);
327 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
328 spin_unlock(&(smi_info->si_lock));
329 ipmi_smi_watchdog_pretimeout(smi_info->intf);
330 spin_lock(&(smi_info->si_lock));
331 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
332 /* Messages available. */
333 smi_info->curr_msg = ipmi_alloc_smi_msg();
334 if (!smi_info->curr_msg) {
335 disable_si_irq(smi_info);
336 smi_info->si_state = SI_NORMAL;
337 return;
339 enable_si_irq(smi_info);
341 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
342 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
343 smi_info->curr_msg->data_size = 2;
345 smi_info->handlers->start_transaction(
346 smi_info->si_sm,
347 smi_info->curr_msg->data,
348 smi_info->curr_msg->data_size);
349 smi_info->si_state = SI_GETTING_MESSAGES;
350 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
351 /* Events available. */
352 smi_info->curr_msg = ipmi_alloc_smi_msg();
353 if (!smi_info->curr_msg) {
354 disable_si_irq(smi_info);
355 smi_info->si_state = SI_NORMAL;
356 return;
358 enable_si_irq(smi_info);
360 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
361 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
362 smi_info->curr_msg->data_size = 2;
364 smi_info->handlers->start_transaction(
365 smi_info->si_sm,
366 smi_info->curr_msg->data,
367 smi_info->curr_msg->data_size);
368 smi_info->si_state = SI_GETTING_EVENTS;
369 } else {
370 smi_info->si_state = SI_NORMAL;
374 static void handle_transaction_done(struct smi_info *smi_info)
376 struct ipmi_smi_msg *msg;
377 #ifdef DEBUG_TIMING
378 struct timeval t;
380 do_gettimeofday(&t);
381 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
382 #endif
383 switch (smi_info->si_state) {
384 case SI_NORMAL:
385 if (!smi_info->curr_msg)
386 break;
388 smi_info->curr_msg->rsp_size
389 = smi_info->handlers->get_result(
390 smi_info->si_sm,
391 smi_info->curr_msg->rsp,
392 IPMI_MAX_MSG_LENGTH);
394 /* Do this here becase deliver_recv_msg() releases the
395 lock, and a new message can be put in during the
396 time the lock is released. */
397 msg = smi_info->curr_msg;
398 smi_info->curr_msg = NULL;
399 deliver_recv_msg(smi_info, msg);
400 break;
402 case SI_GETTING_FLAGS:
404 unsigned char msg[4];
405 unsigned int len;
407 /* We got the flags from the SMI, now handle them. */
408 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
409 if (msg[2] != 0) {
410 /* Error fetching flags, just give up for
411 now. */
412 smi_info->si_state = SI_NORMAL;
413 } else if (len < 4) {
414 /* Hmm, no flags. That's technically illegal, but
415 don't use uninitialized data. */
416 smi_info->si_state = SI_NORMAL;
417 } else {
418 smi_info->msg_flags = msg[3];
419 handle_flags(smi_info);
421 break;
424 case SI_CLEARING_FLAGS:
425 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
427 unsigned char msg[3];
429 /* We cleared the flags. */
430 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
431 if (msg[2] != 0) {
432 /* Error clearing flags */
433 printk(KERN_WARNING
434 "ipmi_si: Error clearing flags: %2.2x\n",
435 msg[2]);
437 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
438 start_enable_irq(smi_info);
439 else
440 smi_info->si_state = SI_NORMAL;
441 break;
444 case SI_GETTING_EVENTS:
446 smi_info->curr_msg->rsp_size
447 = smi_info->handlers->get_result(
448 smi_info->si_sm,
449 smi_info->curr_msg->rsp,
450 IPMI_MAX_MSG_LENGTH);
452 /* Do this here becase deliver_recv_msg() releases the
453 lock, and a new message can be put in during the
454 time the lock is released. */
455 msg = smi_info->curr_msg;
456 smi_info->curr_msg = NULL;
457 if (msg->rsp[2] != 0) {
458 /* Error getting event, probably done. */
459 msg->done(msg);
461 /* Take off the event flag. */
462 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
463 handle_flags(smi_info);
464 } else {
465 spin_lock(&smi_info->count_lock);
466 smi_info->events++;
467 spin_unlock(&smi_info->count_lock);
469 /* Do this before we deliver the message
470 because delivering the message releases the
471 lock and something else can mess with the
472 state. */
473 handle_flags(smi_info);
475 deliver_recv_msg(smi_info, msg);
477 break;
480 case SI_GETTING_MESSAGES:
482 smi_info->curr_msg->rsp_size
483 = smi_info->handlers->get_result(
484 smi_info->si_sm,
485 smi_info->curr_msg->rsp,
486 IPMI_MAX_MSG_LENGTH);
488 /* Do this here becase deliver_recv_msg() releases the
489 lock, and a new message can be put in during the
490 time the lock is released. */
491 msg = smi_info->curr_msg;
492 smi_info->curr_msg = NULL;
493 if (msg->rsp[2] != 0) {
494 /* Error getting event, probably done. */
495 msg->done(msg);
497 /* Take off the msg flag. */
498 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
499 handle_flags(smi_info);
500 } else {
501 spin_lock(&smi_info->count_lock);
502 smi_info->incoming_messages++;
503 spin_unlock(&smi_info->count_lock);
505 /* Do this before we deliver the message
506 because delivering the message releases the
507 lock and something else can mess with the
508 state. */
509 handle_flags(smi_info);
511 deliver_recv_msg(smi_info, msg);
513 break;
516 case SI_ENABLE_INTERRUPTS1:
518 unsigned char msg[4];
520 /* We got the flags from the SMI, now handle them. */
521 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
522 if (msg[2] != 0) {
523 printk(KERN_WARNING
524 "ipmi_si: Could not enable interrupts"
525 ", failed get, using polled mode.\n");
526 smi_info->si_state = SI_NORMAL;
527 } else {
528 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
529 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
530 msg[2] = msg[3] | 1; /* enable msg queue int */
531 smi_info->handlers->start_transaction(
532 smi_info->si_sm, msg, 3);
533 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
535 break;
538 case SI_ENABLE_INTERRUPTS2:
540 unsigned char msg[4];
542 /* We got the flags from the SMI, now handle them. */
543 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
544 if (msg[2] != 0) {
545 printk(KERN_WARNING
546 "ipmi_si: Could not enable interrupts"
547 ", failed set, using polled mode.\n");
549 smi_info->si_state = SI_NORMAL;
550 break;
555 /* Called on timeouts and events. Timeouts should pass the elapsed
556 time, interrupts should pass in zero. */
557 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
558 int time)
560 enum si_sm_result si_sm_result;
562 restart:
563 /* There used to be a loop here that waited a little while
564 (around 25us) before giving up. That turned out to be
565 pointless, the minimum delays I was seeing were in the 300us
566 range, which is far too long to wait in an interrupt. So
567 we just run until the state machine tells us something
568 happened or it needs a delay. */
569 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
570 time = 0;
571 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
573 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
576 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
578 spin_lock(&smi_info->count_lock);
579 smi_info->complete_transactions++;
580 spin_unlock(&smi_info->count_lock);
582 handle_transaction_done(smi_info);
583 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
585 else if (si_sm_result == SI_SM_HOSED)
587 spin_lock(&smi_info->count_lock);
588 smi_info->hosed_count++;
589 spin_unlock(&smi_info->count_lock);
591 /* Do the before return_hosed_msg, because that
592 releases the lock. */
593 smi_info->si_state = SI_NORMAL;
594 if (smi_info->curr_msg != NULL) {
595 /* If we were handling a user message, format
596 a response to send to the upper layer to
597 tell it about the error. */
598 return_hosed_msg(smi_info);
600 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
603 /* We prefer handling attn over new messages. */
604 if (si_sm_result == SI_SM_ATTN)
606 unsigned char msg[2];
608 spin_lock(&smi_info->count_lock);
609 smi_info->attentions++;
610 spin_unlock(&smi_info->count_lock);
612 /* Got a attn, send down a get message flags to see
613 what's causing it. It would be better to handle
614 this in the upper layer, but due to the way
615 interrupts work with the SMI, that's not really
616 possible. */
617 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
618 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
620 smi_info->handlers->start_transaction(
621 smi_info->si_sm, msg, 2);
622 smi_info->si_state = SI_GETTING_FLAGS;
623 goto restart;
626 /* If we are currently idle, try to start the next message. */
627 if (si_sm_result == SI_SM_IDLE) {
628 spin_lock(&smi_info->count_lock);
629 smi_info->idles++;
630 spin_unlock(&smi_info->count_lock);
632 si_sm_result = start_next_msg(smi_info);
633 if (si_sm_result != SI_SM_IDLE)
634 goto restart;
637 if ((si_sm_result == SI_SM_IDLE)
638 && (atomic_read(&smi_info->req_events)))
640 /* We are idle and the upper layer requested that I fetch
641 events, so do so. */
642 unsigned char msg[2];
644 spin_lock(&smi_info->count_lock);
645 smi_info->flag_fetches++;
646 spin_unlock(&smi_info->count_lock);
648 atomic_set(&smi_info->req_events, 0);
649 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
650 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
652 smi_info->handlers->start_transaction(
653 smi_info->si_sm, msg, 2);
654 smi_info->si_state = SI_GETTING_FLAGS;
655 goto restart;
658 return si_sm_result;
661 static void sender(void *send_info,
662 struct ipmi_smi_msg *msg,
663 int priority)
665 struct smi_info *smi_info = send_info;
666 enum si_sm_result result;
667 unsigned long flags;
668 #ifdef DEBUG_TIMING
669 struct timeval t;
670 #endif
672 spin_lock_irqsave(&(smi_info->msg_lock), flags);
673 #ifdef DEBUG_TIMING
674 do_gettimeofday(&t);
675 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
676 #endif
678 if (smi_info->run_to_completion) {
679 /* If we are running to completion, then throw it in
680 the list and run transactions until everything is
681 clear. Priority doesn't matter here. */
682 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
684 /* We have to release the msg lock and claim the smi
685 lock in this case, because of race conditions. */
686 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
688 spin_lock_irqsave(&(smi_info->si_lock), flags);
689 result = smi_event_handler(smi_info, 0);
690 while (result != SI_SM_IDLE) {
691 udelay(SI_SHORT_TIMEOUT_USEC);
692 result = smi_event_handler(smi_info,
693 SI_SHORT_TIMEOUT_USEC);
695 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
696 return;
697 } else {
698 if (priority > 0) {
699 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
700 } else {
701 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
704 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
706 spin_lock_irqsave(&(smi_info->si_lock), flags);
707 if ((smi_info->si_state == SI_NORMAL)
708 && (smi_info->curr_msg == NULL))
710 start_next_msg(smi_info);
711 si_restart_short_timer(smi_info);
713 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
716 static void set_run_to_completion(void *send_info, int i_run_to_completion)
718 struct smi_info *smi_info = send_info;
719 enum si_sm_result result;
720 unsigned long flags;
722 spin_lock_irqsave(&(smi_info->si_lock), flags);
724 smi_info->run_to_completion = i_run_to_completion;
725 if (i_run_to_completion) {
726 result = smi_event_handler(smi_info, 0);
727 while (result != SI_SM_IDLE) {
728 udelay(SI_SHORT_TIMEOUT_USEC);
729 result = smi_event_handler(smi_info,
730 SI_SHORT_TIMEOUT_USEC);
734 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
737 static void poll(void *send_info)
739 struct smi_info *smi_info = send_info;
741 smi_event_handler(smi_info, 0);
744 static void request_events(void *send_info)
746 struct smi_info *smi_info = send_info;
748 atomic_set(&smi_info->req_events, 1);
751 static int initialized = 0;
753 /* Must be called with interrupts off and with the si_lock held. */
754 static void si_restart_short_timer(struct smi_info *smi_info)
756 #if defined(CONFIG_HIGH_RES_TIMERS)
757 unsigned long flags;
758 unsigned long jiffies_now;
760 if (del_timer(&(smi_info->si_timer))) {
761 /* If we don't delete the timer, then it will go off
762 immediately, anyway. So we only process if we
763 actually delete the timer. */
765 /* We already have irqsave on, so no need for it
766 here. */
767 read_lock(&xtime_lock);
768 jiffies_now = jiffies;
769 smi_info->si_timer.expires = jiffies_now;
770 smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now);
772 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
774 add_timer(&(smi_info->si_timer));
775 spin_lock_irqsave(&smi_info->count_lock, flags);
776 smi_info->timeout_restarts++;
777 spin_unlock_irqrestore(&smi_info->count_lock, flags);
779 #endif
782 static void smi_timeout(unsigned long data)
784 struct smi_info *smi_info = (struct smi_info *) data;
785 enum si_sm_result smi_result;
786 unsigned long flags;
787 unsigned long jiffies_now;
788 unsigned long time_diff;
789 #ifdef DEBUG_TIMING
790 struct timeval t;
791 #endif
793 if (smi_info->stop_operation) {
794 smi_info->timer_stopped = 1;
795 return;
798 spin_lock_irqsave(&(smi_info->si_lock), flags);
799 #ifdef DEBUG_TIMING
800 do_gettimeofday(&t);
801 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
802 #endif
803 jiffies_now = jiffies;
804 time_diff = ((jiffies_now - smi_info->last_timeout_jiffies)
805 * SI_USEC_PER_JIFFY);
806 smi_result = smi_event_handler(smi_info, time_diff);
808 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
810 smi_info->last_timeout_jiffies = jiffies_now;
812 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
813 /* Running with interrupts, only do long timeouts. */
814 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
815 spin_lock_irqsave(&smi_info->count_lock, flags);
816 smi_info->long_timeouts++;
817 spin_unlock_irqrestore(&smi_info->count_lock, flags);
818 goto do_add_timer;
821 /* If the state machine asks for a short delay, then shorten
822 the timer timeout. */
823 if (smi_result == SI_SM_CALL_WITH_DELAY) {
824 spin_lock_irqsave(&smi_info->count_lock, flags);
825 smi_info->short_timeouts++;
826 spin_unlock_irqrestore(&smi_info->count_lock, flags);
827 #if defined(CONFIG_HIGH_RES_TIMERS)
828 read_lock(&xtime_lock);
829 smi_info->si_timer.expires = jiffies;
830 smi_info->si_timer.sub_expires
831 = get_arch_cycles(smi_info->si_timer.expires);
832 read_unlock(&xtime_lock);
833 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
834 #else
835 smi_info->si_timer.expires = jiffies + 1;
836 #endif
837 } else {
838 spin_lock_irqsave(&smi_info->count_lock, flags);
839 smi_info->long_timeouts++;
840 spin_unlock_irqrestore(&smi_info->count_lock, flags);
841 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
842 #if defined(CONFIG_HIGH_RES_TIMERS)
843 smi_info->si_timer.sub_expires = 0;
844 #endif
847 do_add_timer:
848 add_timer(&(smi_info->si_timer));
851 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
853 struct smi_info *smi_info = data;
854 unsigned long flags;
855 #ifdef DEBUG_TIMING
856 struct timeval t;
857 #endif
859 spin_lock_irqsave(&(smi_info->si_lock), flags);
861 spin_lock(&smi_info->count_lock);
862 smi_info->interrupts++;
863 spin_unlock(&smi_info->count_lock);
865 if (smi_info->stop_operation)
866 goto out;
868 #ifdef DEBUG_TIMING
869 do_gettimeofday(&t);
870 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
871 #endif
872 smi_event_handler(smi_info, 0);
873 out:
874 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
875 return IRQ_HANDLED;
878 static struct ipmi_smi_handlers handlers =
880 .owner = THIS_MODULE,
881 .sender = sender,
882 .request_events = request_events,
883 .set_run_to_completion = set_run_to_completion,
884 .poll = poll,
887 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
888 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
890 #define SI_MAX_PARMS 4
891 #define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
892 static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
893 { NULL, NULL, NULL, NULL };
895 #define DEVICE_NAME "ipmi_si"
897 #define DEFAULT_KCS_IO_PORT 0xca2
898 #define DEFAULT_SMIC_IO_PORT 0xca9
899 #define DEFAULT_BT_IO_PORT 0xe4
900 #define DEFAULT_REGSPACING 1
902 static int si_trydefaults = 1;
903 static char *si_type[SI_MAX_PARMS];
904 #define MAX_SI_TYPE_STR 30
905 static char si_type_str[MAX_SI_TYPE_STR];
906 static unsigned long addrs[SI_MAX_PARMS];
907 static int num_addrs;
908 static unsigned int ports[SI_MAX_PARMS];
909 static int num_ports;
910 static int irqs[SI_MAX_PARMS];
911 static int num_irqs;
912 static int regspacings[SI_MAX_PARMS];
913 static int num_regspacings = 0;
914 static int regsizes[SI_MAX_PARMS];
915 static int num_regsizes = 0;
916 static int regshifts[SI_MAX_PARMS];
917 static int num_regshifts = 0;
918 static int slave_addrs[SI_MAX_PARMS];
919 static int num_slave_addrs = 0;
922 module_param_named(trydefaults, si_trydefaults, bool, 0);
923 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
924 " default scan of the KCS and SMIC interface at the standard"
925 " address");
926 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
927 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
928 " interface separated by commas. The types are 'kcs',"
929 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
930 " the first interface to kcs and the second to bt");
931 module_param_array(addrs, long, &num_addrs, 0);
932 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
933 " addresses separated by commas. Only use if an interface"
934 " is in memory. Otherwise, set it to zero or leave"
935 " it blank.");
936 module_param_array(ports, int, &num_ports, 0);
937 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
938 " addresses separated by commas. Only use if an interface"
939 " is a port. Otherwise, set it to zero or leave"
940 " it blank.");
941 module_param_array(irqs, int, &num_irqs, 0);
942 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
943 " addresses separated by commas. Only use if an interface"
944 " has an interrupt. Otherwise, set it to zero or leave"
945 " it blank.");
946 module_param_array(regspacings, int, &num_regspacings, 0);
947 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
948 " and each successive register used by the interface. For"
949 " instance, if the start address is 0xca2 and the spacing"
950 " is 2, then the second address is at 0xca4. Defaults"
951 " to 1.");
952 module_param_array(regsizes, int, &num_regsizes, 0);
953 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
954 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
955 " 16-bit, 32-bit, or 64-bit register. Use this if you"
956 " the 8-bit IPMI register has to be read from a larger"
957 " register.");
958 module_param_array(regshifts, int, &num_regshifts, 0);
959 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
960 " IPMI register, in bits. For instance, if the data"
961 " is read from a 32-bit word and the IPMI data is in"
962 " bit 8-15, then the shift would be 8");
963 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
964 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
965 " the controller. Normally this is 0x20, but can be"
966 " overridden by this parm. This is an array indexed"
967 " by interface number.");
970 #define IPMI_MEM_ADDR_SPACE 1
971 #define IPMI_IO_ADDR_SPACE 2
973 #if defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)
974 static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
976 int i;
978 for (i = 0; i < SI_MAX_PARMS; ++i) {
979 /* Don't check our address. */
980 if (i == intf)
981 continue;
982 if (si_type[i] != NULL) {
983 if ((addr_space == IPMI_MEM_ADDR_SPACE &&
984 base_addr == addrs[i]) ||
985 (addr_space == IPMI_IO_ADDR_SPACE &&
986 base_addr == ports[i]))
987 return 0;
989 else
990 break;
993 return 1;
995 #endif
997 static int std_irq_setup(struct smi_info *info)
999 int rv;
1001 if (!info->irq)
1002 return 0;
1004 rv = request_irq(info->irq,
1005 si_irq_handler,
1006 SA_INTERRUPT,
1007 DEVICE_NAME,
1008 info);
1009 if (rv) {
1010 printk(KERN_WARNING
1011 "ipmi_si: %s unable to claim interrupt %d,"
1012 " running polled\n",
1013 DEVICE_NAME, info->irq);
1014 info->irq = 0;
1015 } else {
1016 printk(" Using irq %d\n", info->irq);
1019 return rv;
1022 static void std_irq_cleanup(struct smi_info *info)
1024 if (!info->irq)
1025 return;
1027 free_irq(info->irq, info);
1030 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1032 unsigned int *addr = io->info;
1034 return inb((*addr)+(offset*io->regspacing));
1037 static void port_outb(struct si_sm_io *io, unsigned int offset,
1038 unsigned char b)
1040 unsigned int *addr = io->info;
1042 outb(b, (*addr)+(offset * io->regspacing));
1045 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1047 unsigned int *addr = io->info;
1049 return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1052 static void port_outw(struct si_sm_io *io, unsigned int offset,
1053 unsigned char b)
1055 unsigned int *addr = io->info;
1057 outw(b << io->regshift, (*addr)+(offset * io->regspacing));
1060 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1062 unsigned int *addr = io->info;
1064 return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1067 static void port_outl(struct si_sm_io *io, unsigned int offset,
1068 unsigned char b)
1070 unsigned int *addr = io->info;
1072 outl(b << io->regshift, (*addr)+(offset * io->regspacing));
1075 static void port_cleanup(struct smi_info *info)
1077 unsigned int *addr = info->io.info;
1078 int mapsize;
1080 if (addr && (*addr)) {
1081 mapsize = ((info->io_size * info->io.regspacing)
1082 - (info->io.regspacing - info->io.regsize));
1084 release_region (*addr, mapsize);
1086 kfree(info);
1089 static int port_setup(struct smi_info *info)
1091 unsigned int *addr = info->io.info;
1092 int mapsize;
1094 if (!addr || (!*addr))
1095 return -ENODEV;
1097 info->io_cleanup = port_cleanup;
1099 /* Figure out the actual inb/inw/inl/etc routine to use based
1100 upon the register size. */
1101 switch (info->io.regsize) {
1102 case 1:
1103 info->io.inputb = port_inb;
1104 info->io.outputb = port_outb;
1105 break;
1106 case 2:
1107 info->io.inputb = port_inw;
1108 info->io.outputb = port_outw;
1109 break;
1110 case 4:
1111 info->io.inputb = port_inl;
1112 info->io.outputb = port_outl;
1113 break;
1114 default:
1115 printk("ipmi_si: Invalid register size: %d\n",
1116 info->io.regsize);
1117 return -EINVAL;
1120 /* Calculate the total amount of memory to claim. This is an
1121 * unusual looking calculation, but it avoids claiming any
1122 * more memory than it has to. It will claim everything
1123 * between the first address to the end of the last full
1124 * register. */
1125 mapsize = ((info->io_size * info->io.regspacing)
1126 - (info->io.regspacing - info->io.regsize));
1128 if (request_region(*addr, mapsize, DEVICE_NAME) == NULL)
1129 return -EIO;
1130 return 0;
1133 static int try_init_port(int intf_num, struct smi_info **new_info)
1135 struct smi_info *info;
1137 if (!ports[intf_num])
1138 return -ENODEV;
1140 if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1141 ports[intf_num]))
1142 return -ENODEV;
1144 info = kmalloc(sizeof(*info), GFP_KERNEL);
1145 if (!info) {
1146 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1147 return -ENOMEM;
1149 memset(info, 0, sizeof(*info));
1151 info->io_setup = port_setup;
1152 info->io.info = &(ports[intf_num]);
1153 info->io.addr = NULL;
1154 info->io.regspacing = regspacings[intf_num];
1155 if (!info->io.regspacing)
1156 info->io.regspacing = DEFAULT_REGSPACING;
1157 info->io.regsize = regsizes[intf_num];
1158 if (!info->io.regsize)
1159 info->io.regsize = DEFAULT_REGSPACING;
1160 info->io.regshift = regshifts[intf_num];
1161 info->irq = 0;
1162 info->irq_setup = NULL;
1163 *new_info = info;
1165 if (si_type[intf_num] == NULL)
1166 si_type[intf_num] = "kcs";
1168 printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1169 si_type[intf_num], ports[intf_num]);
1170 return 0;
1173 static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
1175 return readb((io->addr)+(offset * io->regspacing));
1178 static void mem_outb(struct si_sm_io *io, unsigned int offset,
1179 unsigned char b)
1181 writeb(b, (io->addr)+(offset * io->regspacing));
1184 static unsigned char mem_inw(struct si_sm_io *io, unsigned int offset)
1186 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1187 && 0xff;
1190 static void mem_outw(struct si_sm_io *io, unsigned int offset,
1191 unsigned char b)
1193 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1196 static unsigned char mem_inl(struct si_sm_io *io, unsigned int offset)
1198 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1199 && 0xff;
1202 static void mem_outl(struct si_sm_io *io, unsigned int offset,
1203 unsigned char b)
1205 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1208 #ifdef readq
1209 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1211 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1212 && 0xff;
1215 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1216 unsigned char b)
1218 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1220 #endif
1222 static void mem_cleanup(struct smi_info *info)
1224 unsigned long *addr = info->io.info;
1225 int mapsize;
1227 if (info->io.addr) {
1228 iounmap(info->io.addr);
1230 mapsize = ((info->io_size * info->io.regspacing)
1231 - (info->io.regspacing - info->io.regsize));
1233 release_mem_region(*addr, mapsize);
1235 kfree(info);
1238 static int mem_setup(struct smi_info *info)
1240 unsigned long *addr = info->io.info;
1241 int mapsize;
1243 if (!addr || (!*addr))
1244 return -ENODEV;
1246 info->io_cleanup = mem_cleanup;
1248 /* Figure out the actual readb/readw/readl/etc routine to use based
1249 upon the register size. */
1250 switch (info->io.regsize) {
1251 case 1:
1252 info->io.inputb = mem_inb;
1253 info->io.outputb = mem_outb;
1254 break;
1255 case 2:
1256 info->io.inputb = mem_inw;
1257 info->io.outputb = mem_outw;
1258 break;
1259 case 4:
1260 info->io.inputb = mem_inl;
1261 info->io.outputb = mem_outl;
1262 break;
1263 #ifdef readq
1264 case 8:
1265 info->io.inputb = mem_inq;
1266 info->io.outputb = mem_outq;
1267 break;
1268 #endif
1269 default:
1270 printk("ipmi_si: Invalid register size: %d\n",
1271 info->io.regsize);
1272 return -EINVAL;
1275 /* Calculate the total amount of memory to claim. This is an
1276 * unusual looking calculation, but it avoids claiming any
1277 * more memory than it has to. It will claim everything
1278 * between the first address to the end of the last full
1279 * register. */
1280 mapsize = ((info->io_size * info->io.regspacing)
1281 - (info->io.regspacing - info->io.regsize));
1283 if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL)
1284 return -EIO;
1286 info->io.addr = ioremap(*addr, mapsize);
1287 if (info->io.addr == NULL) {
1288 release_mem_region(*addr, mapsize);
1289 return -EIO;
1291 return 0;
1294 static int try_init_mem(int intf_num, struct smi_info **new_info)
1296 struct smi_info *info;
1298 if (!addrs[intf_num])
1299 return -ENODEV;
1301 if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1302 addrs[intf_num]))
1303 return -ENODEV;
1305 info = kmalloc(sizeof(*info), GFP_KERNEL);
1306 if (!info) {
1307 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1308 return -ENOMEM;
1310 memset(info, 0, sizeof(*info));
1312 info->io_setup = mem_setup;
1313 info->io.info = &addrs[intf_num];
1314 info->io.addr = NULL;
1315 info->io.regspacing = regspacings[intf_num];
1316 if (!info->io.regspacing)
1317 info->io.regspacing = DEFAULT_REGSPACING;
1318 info->io.regsize = regsizes[intf_num];
1319 if (!info->io.regsize)
1320 info->io.regsize = DEFAULT_REGSPACING;
1321 info->io.regshift = regshifts[intf_num];
1322 info->irq = 0;
1323 info->irq_setup = NULL;
1324 *new_info = info;
1326 if (si_type[intf_num] == NULL)
1327 si_type[intf_num] = "kcs";
1329 printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
1330 si_type[intf_num], addrs[intf_num]);
1331 return 0;
1335 #ifdef CONFIG_ACPI_INTERPRETER
1337 #include <linux/acpi.h>
1339 /* Once we get an ACPI failure, we don't try any more, because we go
1340 through the tables sequentially. Once we don't find a table, there
1341 are no more. */
1342 static int acpi_failure = 0;
1344 /* For GPE-type interrupts. */
1345 static u32 ipmi_acpi_gpe(void *context)
1347 struct smi_info *smi_info = context;
1348 unsigned long flags;
1349 #ifdef DEBUG_TIMING
1350 struct timeval t;
1351 #endif
1353 spin_lock_irqsave(&(smi_info->si_lock), flags);
1355 spin_lock(&smi_info->count_lock);
1356 smi_info->interrupts++;
1357 spin_unlock(&smi_info->count_lock);
1359 if (smi_info->stop_operation)
1360 goto out;
1362 #ifdef DEBUG_TIMING
1363 do_gettimeofday(&t);
1364 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1365 #endif
1366 smi_event_handler(smi_info, 0);
1367 out:
1368 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1370 return ACPI_INTERRUPT_HANDLED;
1373 static int acpi_gpe_irq_setup(struct smi_info *info)
1375 acpi_status status;
1377 if (!info->irq)
1378 return 0;
1380 /* FIXME - is level triggered right? */
1381 status = acpi_install_gpe_handler(NULL,
1382 info->irq,
1383 ACPI_GPE_LEVEL_TRIGGERED,
1384 &ipmi_acpi_gpe,
1385 info);
1386 if (status != AE_OK) {
1387 printk(KERN_WARNING
1388 "ipmi_si: %s unable to claim ACPI GPE %d,"
1389 " running polled\n",
1390 DEVICE_NAME, info->irq);
1391 info->irq = 0;
1392 return -EINVAL;
1393 } else {
1394 printk(" Using ACPI GPE %d\n", info->irq);
1395 return 0;
1399 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1401 if (!info->irq)
1402 return;
1404 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1408 * Defined at
1409 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1411 struct SPMITable {
1412 s8 Signature[4];
1413 u32 Length;
1414 u8 Revision;
1415 u8 Checksum;
1416 s8 OEMID[6];
1417 s8 OEMTableID[8];
1418 s8 OEMRevision[4];
1419 s8 CreatorID[4];
1420 s8 CreatorRevision[4];
1421 u8 InterfaceType;
1422 u8 IPMIlegacy;
1423 s16 SpecificationRevision;
1426 * Bit 0 - SCI interrupt supported
1427 * Bit 1 - I/O APIC/SAPIC
1429 u8 InterruptType;
1431 /* If bit 0 of InterruptType is set, then this is the SCI
1432 interrupt in the GPEx_STS register. */
1433 u8 GPE;
1435 s16 Reserved;
1437 /* If bit 1 of InterruptType is set, then this is the I/O
1438 APIC/SAPIC interrupt. */
1439 u32 GlobalSystemInterrupt;
1441 /* The actual register address. */
1442 struct acpi_generic_address addr;
1444 u8 UID[4];
1446 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1449 static int try_init_acpi(int intf_num, struct smi_info **new_info)
1451 struct smi_info *info;
1452 acpi_status status;
1453 struct SPMITable *spmi;
1454 char *io_type;
1455 u8 addr_space;
1457 if (acpi_failure)
1458 return -ENODEV;
1460 status = acpi_get_firmware_table("SPMI", intf_num+1,
1461 ACPI_LOGICAL_ADDRESSING,
1462 (struct acpi_table_header **) &spmi);
1463 if (status != AE_OK) {
1464 acpi_failure = 1;
1465 return -ENODEV;
1468 if (spmi->IPMIlegacy != 1) {
1469 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1470 return -ENODEV;
1473 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1474 addr_space = IPMI_MEM_ADDR_SPACE;
1475 else
1476 addr_space = IPMI_IO_ADDR_SPACE;
1477 if (!is_new_interface(-1, addr_space, spmi->addr.address))
1478 return -ENODEV;
1480 if (!spmi->addr.register_bit_width) {
1481 acpi_failure = 1;
1482 return -ENODEV;
1485 /* Figure out the interface type. */
1486 switch (spmi->InterfaceType)
1488 case 1: /* KCS */
1489 si_type[intf_num] = "kcs";
1490 break;
1492 case 2: /* SMIC */
1493 si_type[intf_num] = "smic";
1494 break;
1496 case 3: /* BT */
1497 si_type[intf_num] = "bt";
1498 break;
1500 default:
1501 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1502 spmi->InterfaceType);
1503 return -EIO;
1506 info = kmalloc(sizeof(*info), GFP_KERNEL);
1507 if (!info) {
1508 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1509 return -ENOMEM;
1511 memset(info, 0, sizeof(*info));
1513 if (spmi->InterruptType & 1) {
1514 /* We've got a GPE interrupt. */
1515 info->irq = spmi->GPE;
1516 info->irq_setup = acpi_gpe_irq_setup;
1517 info->irq_cleanup = acpi_gpe_irq_cleanup;
1518 } else if (spmi->InterruptType & 2) {
1519 /* We've got an APIC/SAPIC interrupt. */
1520 info->irq = spmi->GlobalSystemInterrupt;
1521 info->irq_setup = std_irq_setup;
1522 info->irq_cleanup = std_irq_cleanup;
1523 } else {
1524 /* Use the default interrupt setting. */
1525 info->irq = 0;
1526 info->irq_setup = NULL;
1529 regspacings[intf_num] = spmi->addr.register_bit_width / 8;
1530 info->io.regspacing = spmi->addr.register_bit_width / 8;
1531 regsizes[intf_num] = regspacings[intf_num];
1532 info->io.regsize = regsizes[intf_num];
1533 regshifts[intf_num] = spmi->addr.register_bit_offset;
1534 info->io.regshift = regshifts[intf_num];
1536 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1537 io_type = "memory";
1538 info->io_setup = mem_setup;
1539 addrs[intf_num] = spmi->addr.address;
1540 info->io.info = &(addrs[intf_num]);
1541 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1542 io_type = "I/O";
1543 info->io_setup = port_setup;
1544 ports[intf_num] = spmi->addr.address;
1545 info->io.info = &(ports[intf_num]);
1546 } else {
1547 kfree(info);
1548 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1549 return -EIO;
1552 *new_info = info;
1554 printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1555 si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1556 return 0;
1558 #endif
1560 #ifdef CONFIG_X86
1561 typedef struct dmi_ipmi_data
1563 u8 type;
1564 u8 addr_space;
1565 unsigned long base_addr;
1566 u8 irq;
1567 u8 offset;
1568 u8 slave_addr;
1569 } dmi_ipmi_data_t;
1571 static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
1572 static int dmi_data_entries;
1574 typedef struct dmi_header
1576 u8 type;
1577 u8 length;
1578 u16 handle;
1579 } dmi_header_t;
1581 static int decode_dmi(dmi_header_t *dm, int intf_num)
1583 u8 *data = (u8 *)dm;
1584 unsigned long base_addr;
1585 u8 reg_spacing;
1586 u8 len = dm->length;
1587 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1589 ipmi_data->type = data[4];
1591 memcpy(&base_addr, data+8, sizeof(unsigned long));
1592 if (len >= 0x11) {
1593 if (base_addr & 1) {
1594 /* I/O */
1595 base_addr &= 0xFFFE;
1596 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1598 else {
1599 /* Memory */
1600 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
1602 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1603 is odd. */
1604 ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1606 ipmi_data->irq = data[0x11];
1608 /* The top two bits of byte 0x10 hold the register spacing. */
1609 reg_spacing = (data[0x10] & 0xC0) >> 6;
1610 switch(reg_spacing){
1611 case 0x00: /* Byte boundaries */
1612 ipmi_data->offset = 1;
1613 break;
1614 case 0x01: /* 32-bit boundaries */
1615 ipmi_data->offset = 4;
1616 break;
1617 case 0x02: /* 16-byte boundaries */
1618 ipmi_data->offset = 16;
1619 break;
1620 default:
1621 /* Some other interface, just ignore it. */
1622 return -EIO;
1624 } else {
1625 /* Old DMI spec. */
1626 ipmi_data->base_addr = base_addr;
1627 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1628 ipmi_data->offset = 1;
1631 ipmi_data->slave_addr = data[6];
1633 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
1634 dmi_data_entries++;
1635 return 0;
1638 memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t));
1640 return -1;
1643 static int dmi_table(u32 base, int len, int num)
1645 u8 *buf;
1646 struct dmi_header *dm;
1647 u8 *data;
1648 int i=1;
1649 int status=-1;
1650 int intf_num = 0;
1652 buf = ioremap(base, len);
1653 if(buf==NULL)
1654 return -1;
1656 data = buf;
1658 while(i<num && (data - buf) < len)
1660 dm=(dmi_header_t *)data;
1662 if((data-buf+dm->length) >= len)
1663 break;
1665 if (dm->type == 38) {
1666 if (decode_dmi(dm, intf_num) == 0) {
1667 intf_num++;
1668 if (intf_num >= SI_MAX_DRIVERS)
1669 break;
1673 data+=dm->length;
1674 while((data-buf) < len && (*data || data[1]))
1675 data++;
1676 data+=2;
1677 i++;
1679 iounmap(buf);
1681 return status;
1684 inline static int dmi_checksum(u8 *buf)
1686 u8 sum=0;
1687 int a;
1689 for(a=0; a<15; a++)
1690 sum+=buf[a];
1691 return (sum==0);
1694 static int dmi_decode(void)
1696 u8 buf[15];
1697 u32 fp=0xF0000;
1699 #ifdef CONFIG_SIMNOW
1700 return -1;
1701 #endif
1703 while(fp < 0xFFFFF)
1705 isa_memcpy_fromio(buf, fp, 15);
1706 if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
1708 u16 num=buf[13]<<8|buf[12];
1709 u16 len=buf[7]<<8|buf[6];
1710 u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
1712 if(dmi_table(base, len, num) == 0)
1713 return 0;
1715 fp+=16;
1718 return -1;
1721 static int try_init_smbios(int intf_num, struct smi_info **new_info)
1723 struct smi_info *info;
1724 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1725 char *io_type;
1727 if (intf_num >= dmi_data_entries)
1728 return -ENODEV;
1730 switch(ipmi_data->type) {
1731 case 0x01: /* KCS */
1732 si_type[intf_num] = "kcs";
1733 break;
1734 case 0x02: /* SMIC */
1735 si_type[intf_num] = "smic";
1736 break;
1737 case 0x03: /* BT */
1738 si_type[intf_num] = "bt";
1739 break;
1740 default:
1741 return -EIO;
1744 info = kmalloc(sizeof(*info), GFP_KERNEL);
1745 if (!info) {
1746 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1747 return -ENOMEM;
1749 memset(info, 0, sizeof(*info));
1751 if (ipmi_data->addr_space == 1) {
1752 io_type = "memory";
1753 info->io_setup = mem_setup;
1754 addrs[intf_num] = ipmi_data->base_addr;
1755 info->io.info = &(addrs[intf_num]);
1756 } else if (ipmi_data->addr_space == 2) {
1757 io_type = "I/O";
1758 info->io_setup = port_setup;
1759 ports[intf_num] = ipmi_data->base_addr;
1760 info->io.info = &(ports[intf_num]);
1761 } else {
1762 kfree(info);
1763 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
1764 return -EIO;
1767 regspacings[intf_num] = ipmi_data->offset;
1768 info->io.regspacing = regspacings[intf_num];
1769 if (!info->io.regspacing)
1770 info->io.regspacing = DEFAULT_REGSPACING;
1771 info->io.regsize = DEFAULT_REGSPACING;
1772 info->io.regshift = regshifts[intf_num];
1774 info->slave_addr = ipmi_data->slave_addr;
1776 irqs[intf_num] = ipmi_data->irq;
1778 *new_info = info;
1780 printk("ipmi_si: Found SMBIOS-specified state machine at %s"
1781 " address 0x%lx, slave address 0x%x\n",
1782 io_type, (unsigned long)ipmi_data->base_addr,
1783 ipmi_data->slave_addr);
1784 return 0;
1786 #endif /* CONFIG_X86 */
1788 #ifdef CONFIG_PCI
1790 #define PCI_ERMC_CLASSCODE 0x0C0700
1791 #define PCI_HP_VENDOR_ID 0x103C
1792 #define PCI_MMC_DEVICE_ID 0x121A
1793 #define PCI_MMC_ADDR_CW 0x10
1795 /* Avoid more than one attempt to probe pci smic. */
1796 static int pci_smic_checked = 0;
1798 static int find_pci_smic(int intf_num, struct smi_info **new_info)
1800 struct smi_info *info;
1801 int error;
1802 struct pci_dev *pci_dev = NULL;
1803 u16 base_addr;
1804 int fe_rmc = 0;
1806 if (pci_smic_checked)
1807 return -ENODEV;
1809 pci_smic_checked = 1;
1811 if ((pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID,
1812 NULL)))
1814 else if ((pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL)) &&
1815 pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)
1816 fe_rmc = 1;
1817 else
1818 return -ENODEV;
1820 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1821 if (error)
1823 pci_dev_put(pci_dev);
1824 printk(KERN_ERR
1825 "ipmi_si: pci_read_config_word() failed (%d).\n",
1826 error);
1827 return -ENODEV;
1830 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1831 if (!(base_addr & 0x0001))
1833 pci_dev_put(pci_dev);
1834 printk(KERN_ERR
1835 "ipmi_si: memory mapped I/O not supported for PCI"
1836 " smic.\n");
1837 return -ENODEV;
1840 base_addr &= 0xFFFE;
1841 if (!fe_rmc)
1842 /* Data register starts at base address + 1 in eRMC */
1843 ++base_addr;
1845 if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
1846 pci_dev_put(pci_dev);
1847 return -ENODEV;
1850 info = kmalloc(sizeof(*info), GFP_KERNEL);
1851 if (!info) {
1852 pci_dev_put(pci_dev);
1853 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1854 return -ENOMEM;
1856 memset(info, 0, sizeof(*info));
1858 info->io_setup = port_setup;
1859 ports[intf_num] = base_addr;
1860 info->io.info = &(ports[intf_num]);
1861 info->io.regspacing = regspacings[intf_num];
1862 if (!info->io.regspacing)
1863 info->io.regspacing = DEFAULT_REGSPACING;
1864 info->io.regsize = DEFAULT_REGSPACING;
1865 info->io.regshift = regshifts[intf_num];
1867 *new_info = info;
1869 irqs[intf_num] = pci_dev->irq;
1870 si_type[intf_num] = "smic";
1872 printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
1873 (long unsigned int) base_addr);
1875 pci_dev_put(pci_dev);
1876 return 0;
1878 #endif /* CONFIG_PCI */
1880 static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1882 #ifdef CONFIG_PCI
1883 if (find_pci_smic(intf_num, new_info)==0)
1884 return 0;
1885 #endif
1886 /* Include other methods here. */
1888 return -ENODEV;
1892 static int try_get_dev_id(struct smi_info *smi_info)
1894 unsigned char msg[2];
1895 unsigned char *resp;
1896 unsigned long resp_len;
1897 enum si_sm_result smi_result;
1898 int rv = 0;
1900 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1901 if (!resp)
1902 return -ENOMEM;
1904 /* Do a Get Device ID command, since it comes back with some
1905 useful info. */
1906 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1907 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1908 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1910 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1911 for (;;)
1913 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1914 set_current_state(TASK_UNINTERRUPTIBLE);
1915 schedule_timeout(1);
1916 smi_result = smi_info->handlers->event(
1917 smi_info->si_sm, 100);
1919 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1921 smi_result = smi_info->handlers->event(
1922 smi_info->si_sm, 0);
1924 else
1925 break;
1927 if (smi_result == SI_SM_HOSED) {
1928 /* We couldn't get the state machine to run, so whatever's at
1929 the port is probably not an IPMI SMI interface. */
1930 rv = -ENODEV;
1931 goto out;
1934 /* Otherwise, we got some data. */
1935 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1936 resp, IPMI_MAX_MSG_LENGTH);
1937 if (resp_len < 6) {
1938 /* That's odd, it should be longer. */
1939 rv = -EINVAL;
1940 goto out;
1943 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1944 /* That's odd, it shouldn't be able to fail. */
1945 rv = -EINVAL;
1946 goto out;
1949 /* Record info from the get device id, in case we need it. */
1950 smi_info->ipmi_si_dev_rev = resp[4] & 0xf;
1951 smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f;
1952 smi_info->ipmi_si_fw_rev_minor = resp[6];
1953 smi_info->ipmi_version_major = resp[7] & 0xf;
1954 smi_info->ipmi_version_minor = resp[7] >> 4;
1956 out:
1957 kfree(resp);
1958 return rv;
1961 static int type_file_read_proc(char *page, char **start, off_t off,
1962 int count, int *eof, void *data)
1964 char *out = (char *) page;
1965 struct smi_info *smi = data;
1967 switch (smi->si_type) {
1968 case SI_KCS:
1969 return sprintf(out, "kcs\n");
1970 case SI_SMIC:
1971 return sprintf(out, "smic\n");
1972 case SI_BT:
1973 return sprintf(out, "bt\n");
1974 default:
1975 return 0;
1979 static int stat_file_read_proc(char *page, char **start, off_t off,
1980 int count, int *eof, void *data)
1982 char *out = (char *) page;
1983 struct smi_info *smi = data;
1985 out += sprintf(out, "interrupts_enabled: %d\n",
1986 smi->irq && !smi->interrupt_disabled);
1987 out += sprintf(out, "short_timeouts: %ld\n",
1988 smi->short_timeouts);
1989 out += sprintf(out, "long_timeouts: %ld\n",
1990 smi->long_timeouts);
1991 out += sprintf(out, "timeout_restarts: %ld\n",
1992 smi->timeout_restarts);
1993 out += sprintf(out, "idles: %ld\n",
1994 smi->idles);
1995 out += sprintf(out, "interrupts: %ld\n",
1996 smi->interrupts);
1997 out += sprintf(out, "attentions: %ld\n",
1998 smi->attentions);
1999 out += sprintf(out, "flag_fetches: %ld\n",
2000 smi->flag_fetches);
2001 out += sprintf(out, "hosed_count: %ld\n",
2002 smi->hosed_count);
2003 out += sprintf(out, "complete_transactions: %ld\n",
2004 smi->complete_transactions);
2005 out += sprintf(out, "events: %ld\n",
2006 smi->events);
2007 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2008 smi->watchdog_pretimeouts);
2009 out += sprintf(out, "incoming_messages: %ld\n",
2010 smi->incoming_messages);
2012 return (out - ((char *) page));
2015 /* Returns 0 if initialized, or negative on an error. */
2016 static int init_one_smi(int intf_num, struct smi_info **smi)
2018 int rv;
2019 struct smi_info *new_smi;
2022 rv = try_init_mem(intf_num, &new_smi);
2023 if (rv)
2024 rv = try_init_port(intf_num, &new_smi);
2025 #ifdef CONFIG_ACPI_INTERPRETER
2026 if ((rv) && (si_trydefaults)) {
2027 rv = try_init_acpi(intf_num, &new_smi);
2029 #endif
2030 #ifdef CONFIG_X86
2031 if ((rv) && (si_trydefaults)) {
2032 rv = try_init_smbios(intf_num, &new_smi);
2034 #endif
2035 if ((rv) && (si_trydefaults)) {
2036 rv = try_init_plug_and_play(intf_num, &new_smi);
2040 if (rv)
2041 return rv;
2043 /* So we know not to free it unless we have allocated one. */
2044 new_smi->intf = NULL;
2045 new_smi->si_sm = NULL;
2046 new_smi->handlers = NULL;
2048 if (!new_smi->irq_setup) {
2049 new_smi->irq = irqs[intf_num];
2050 new_smi->irq_setup = std_irq_setup;
2051 new_smi->irq_cleanup = std_irq_cleanup;
2054 /* Default to KCS if no type is specified. */
2055 if (si_type[intf_num] == NULL) {
2056 if (si_trydefaults)
2057 si_type[intf_num] = "kcs";
2058 else {
2059 rv = -EINVAL;
2060 goto out_err;
2064 /* Set up the state machine to use. */
2065 if (strcmp(si_type[intf_num], "kcs") == 0) {
2066 new_smi->handlers = &kcs_smi_handlers;
2067 new_smi->si_type = SI_KCS;
2068 } else if (strcmp(si_type[intf_num], "smic") == 0) {
2069 new_smi->handlers = &smic_smi_handlers;
2070 new_smi->si_type = SI_SMIC;
2071 } else if (strcmp(si_type[intf_num], "bt") == 0) {
2072 new_smi->handlers = &bt_smi_handlers;
2073 new_smi->si_type = SI_BT;
2074 } else {
2075 /* No support for anything else yet. */
2076 rv = -EIO;
2077 goto out_err;
2080 /* Allocate the state machine's data and initialize it. */
2081 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2082 if (!new_smi->si_sm) {
2083 printk(" Could not allocate state machine memory\n");
2084 rv = -ENOMEM;
2085 goto out_err;
2087 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2088 &new_smi->io);
2090 /* Now that we know the I/O size, we can set up the I/O. */
2091 rv = new_smi->io_setup(new_smi);
2092 if (rv) {
2093 printk(" Could not set up I/O space\n");
2094 goto out_err;
2097 spin_lock_init(&(new_smi->si_lock));
2098 spin_lock_init(&(new_smi->msg_lock));
2099 spin_lock_init(&(new_smi->count_lock));
2101 /* Do low-level detection first. */
2102 if (new_smi->handlers->detect(new_smi->si_sm)) {
2103 rv = -ENODEV;
2104 goto out_err;
2107 /* Attempt a get device id command. If it fails, we probably
2108 don't have a SMI here. */
2109 rv = try_get_dev_id(new_smi);
2110 if (rv)
2111 goto out_err;
2113 /* Try to claim any interrupts. */
2114 new_smi->irq_setup(new_smi);
2116 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2117 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2118 new_smi->curr_msg = NULL;
2119 atomic_set(&new_smi->req_events, 0);
2120 new_smi->run_to_completion = 0;
2122 new_smi->interrupt_disabled = 0;
2123 new_smi->timer_stopped = 0;
2124 new_smi->stop_operation = 0;
2126 /* Start clearing the flags before we enable interrupts or the
2127 timer to avoid racing with the timer. */
2128 start_clear_flags(new_smi);
2129 /* IRQ is defined to be set when non-zero. */
2130 if (new_smi->irq)
2131 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2133 /* The ipmi_register_smi() code does some operations to
2134 determine the channel information, so we must be ready to
2135 handle operations before it is called. This means we have
2136 to stop the timer if we get an error after this point. */
2137 init_timer(&(new_smi->si_timer));
2138 new_smi->si_timer.data = (long) new_smi;
2139 new_smi->si_timer.function = smi_timeout;
2140 new_smi->last_timeout_jiffies = jiffies;
2141 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2142 add_timer(&(new_smi->si_timer));
2144 rv = ipmi_register_smi(&handlers,
2145 new_smi,
2146 new_smi->ipmi_version_major,
2147 new_smi->ipmi_version_minor,
2148 new_smi->slave_addr,
2149 &(new_smi->intf));
2150 if (rv) {
2151 printk(KERN_ERR
2152 "ipmi_si: Unable to register device: error %d\n",
2153 rv);
2154 goto out_err_stop_timer;
2157 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2158 type_file_read_proc, NULL,
2159 new_smi, THIS_MODULE);
2160 if (rv) {
2161 printk(KERN_ERR
2162 "ipmi_si: Unable to create proc entry: %d\n",
2163 rv);
2164 goto out_err_stop_timer;
2167 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2168 stat_file_read_proc, NULL,
2169 new_smi, THIS_MODULE);
2170 if (rv) {
2171 printk(KERN_ERR
2172 "ipmi_si: Unable to create proc entry: %d\n",
2173 rv);
2174 goto out_err_stop_timer;
2177 *smi = new_smi;
2179 printk(" IPMI %s interface initialized\n", si_type[intf_num]);
2181 return 0;
2183 out_err_stop_timer:
2184 new_smi->stop_operation = 1;
2186 /* Wait for the timer to stop. This avoids problems with race
2187 conditions removing the timer here. */
2188 while (!new_smi->timer_stopped) {
2189 set_current_state(TASK_UNINTERRUPTIBLE);
2190 schedule_timeout(1);
2193 out_err:
2194 if (new_smi->intf)
2195 ipmi_unregister_smi(new_smi->intf);
2197 new_smi->irq_cleanup(new_smi);
2199 /* Wait until we know that we are out of any interrupt
2200 handlers might have been running before we freed the
2201 interrupt. */
2202 synchronize_kernel();
2204 if (new_smi->si_sm) {
2205 if (new_smi->handlers)
2206 new_smi->handlers->cleanup(new_smi->si_sm);
2207 kfree(new_smi->si_sm);
2209 new_smi->io_cleanup(new_smi);
2211 return rv;
2214 static __init int init_ipmi_si(void)
2216 int rv = 0;
2217 int pos = 0;
2218 int i;
2219 char *str;
2221 if (initialized)
2222 return 0;
2223 initialized = 1;
2225 /* Parse out the si_type string into its components. */
2226 str = si_type_str;
2227 if (*str != '\0') {
2228 for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) {
2229 si_type[i] = str;
2230 str = strchr(str, ',');
2231 if (str) {
2232 *str = '\0';
2233 str++;
2234 } else {
2235 break;
2240 printk(KERN_INFO "IPMI System Interface driver version "
2241 IPMI_SI_VERSION);
2242 if (kcs_smi_handlers.version)
2243 printk(", KCS version %s", kcs_smi_handlers.version);
2244 if (smic_smi_handlers.version)
2245 printk(", SMIC version %s", smic_smi_handlers.version);
2246 if (bt_smi_handlers.version)
2247 printk(", BT version %s", bt_smi_handlers.version);
2248 printk("\n");
2250 #ifdef CONFIG_X86
2251 dmi_decode();
2252 #endif
2254 rv = init_one_smi(0, &(smi_infos[pos]));
2255 if (rv && !ports[0] && si_trydefaults) {
2256 /* If we are trying defaults and the initial port is
2257 not set, then set it. */
2258 si_type[0] = "kcs";
2259 ports[0] = DEFAULT_KCS_IO_PORT;
2260 rv = init_one_smi(0, &(smi_infos[pos]));
2261 if (rv) {
2262 /* No KCS - try SMIC */
2263 si_type[0] = "smic";
2264 ports[0] = DEFAULT_SMIC_IO_PORT;
2265 rv = init_one_smi(0, &(smi_infos[pos]));
2267 if (rv) {
2268 /* No SMIC - try BT */
2269 si_type[0] = "bt";
2270 ports[0] = DEFAULT_BT_IO_PORT;
2271 rv = init_one_smi(0, &(smi_infos[pos]));
2274 if (rv == 0)
2275 pos++;
2277 for (i=1; i < SI_MAX_PARMS; i++) {
2278 rv = init_one_smi(i, &(smi_infos[pos]));
2279 if (rv == 0)
2280 pos++;
2283 if (smi_infos[0] == NULL) {
2284 printk("ipmi_si: Unable to find any System Interface(s)\n");
2285 return -ENODEV;
2288 return 0;
2290 module_init(init_ipmi_si);
2292 static void __exit cleanup_one_si(struct smi_info *to_clean)
2294 int rv;
2295 unsigned long flags;
2297 if (! to_clean)
2298 return;
2300 /* Tell the timer and interrupt handlers that we are shutting
2301 down. */
2302 spin_lock_irqsave(&(to_clean->si_lock), flags);
2303 spin_lock(&(to_clean->msg_lock));
2305 to_clean->stop_operation = 1;
2307 to_clean->irq_cleanup(to_clean);
2309 spin_unlock(&(to_clean->msg_lock));
2310 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2312 /* Wait until we know that we are out of any interrupt
2313 handlers might have been running before we freed the
2314 interrupt. */
2315 synchronize_kernel();
2317 /* Wait for the timer to stop. This avoids problems with race
2318 conditions removing the timer here. */
2319 while (!to_clean->timer_stopped) {
2320 set_current_state(TASK_UNINTERRUPTIBLE);
2321 schedule_timeout(1);
2324 /* Interrupts and timeouts are stopped, now make sure the
2325 interface is in a clean state. */
2326 while ((to_clean->curr_msg) || (to_clean->si_state != SI_NORMAL)) {
2327 poll(to_clean);
2328 set_current_state(TASK_UNINTERRUPTIBLE);
2329 schedule_timeout(1);
2332 rv = ipmi_unregister_smi(to_clean->intf);
2333 if (rv) {
2334 printk(KERN_ERR
2335 "ipmi_si: Unable to unregister device: errno=%d\n",
2336 rv);
2339 to_clean->handlers->cleanup(to_clean->si_sm);
2341 kfree(to_clean->si_sm);
2343 to_clean->io_cleanup(to_clean);
2346 static __exit void cleanup_ipmi_si(void)
2348 int i;
2350 if (!initialized)
2351 return;
2353 for (i=0; i<SI_MAX_DRIVERS; i++) {
2354 cleanup_one_si(smi_infos[i]);
2357 module_exit(cleanup_ipmi_si);
2359 MODULE_LICENSE("GPL");