]> err.no Git - linux-2.6/blob - drivers/char/ipmi/ipmi_si_intf.c
[PATCH] IPMI: use dmi_find_device()
[linux-2.6] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  *
13  *  This program is free software; you can redistribute it and/or modify it
14  *  under the terms of the GNU General Public License as published by the
15  *  Free Software Foundation; either version 2 of the License, or (at your
16  *  option) any later version.
17  *
18  *
19  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *  You should have received a copy of the GNU General Public License along
31  *  with this program; if not, write to the Free Software Foundation, Inc.,
32  *  675 Mass Ave, Cambridge, MA 02139, USA.
33  */
34
35 /*
36  * This file holds the "policy" for the interface to the SMI state
37  * machine.  It does the configuration, handles timers and interrupts,
38  * and drives the real SMI state machine.
39  */
40
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <asm/irq.h>
55 #ifdef CONFIG_HIGH_RES_TIMERS
56 #include <linux/hrtime.h>
57 # if defined(schedule_next_int)
58 /* Old high-res timer code, do translations. */
59 #  define get_arch_cycles(a) quick_update_jiffies_sub(a)
60 #  define arch_cycles_per_jiffy cycles_per_jiffies
61 # endif
62 static inline void add_usec_to_timer(struct timer_list *t, long v)
63 {
64         t->sub_expires += nsec_to_arch_cycle(v * 1000);
65         while (t->sub_expires >= arch_cycles_per_jiffy)
66         {
67                 t->expires++;
68                 t->sub_expires -= arch_cycles_per_jiffy;
69         }
70 }
71 #endif
72 #include <linux/interrupt.h>
73 #include <linux/rcupdate.h>
74 #include <linux/ipmi_smi.h>
75 #include <asm/io.h>
76 #include "ipmi_si_sm.h"
77 #include <linux/init.h>
78 #include <linux/dmi.h>
79
80 #define IPMI_SI_VERSION "v33"
81
82 /* Measure times between events in the driver. */
83 #undef DEBUG_TIMING
84
85 /* Call every 10 ms. */
86 #define SI_TIMEOUT_TIME_USEC    10000
87 #define SI_USEC_PER_JIFFY       (1000000/HZ)
88 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
89 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
90                                        short timeout */
91
92 enum si_intf_state {
93         SI_NORMAL,
94         SI_GETTING_FLAGS,
95         SI_GETTING_EVENTS,
96         SI_CLEARING_FLAGS,
97         SI_CLEARING_FLAGS_THEN_SET_IRQ,
98         SI_GETTING_MESSAGES,
99         SI_ENABLE_INTERRUPTS1,
100         SI_ENABLE_INTERRUPTS2
101         /* FIXME - add watchdog stuff. */
102 };
103
104 /* Some BT-specific defines we need here. */
105 #define IPMI_BT_INTMASK_REG             2
106 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
107 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
108
109 enum si_type {
110     SI_KCS, SI_SMIC, SI_BT
111 };
112
113 struct smi_info
114 {
115         ipmi_smi_t             intf;
116         struct si_sm_data      *si_sm;
117         struct si_sm_handlers  *handlers;
118         enum si_type           si_type;
119         spinlock_t             si_lock;
120         spinlock_t             msg_lock;
121         struct list_head       xmit_msgs;
122         struct list_head       hp_xmit_msgs;
123         struct ipmi_smi_msg    *curr_msg;
124         enum si_intf_state     si_state;
125
126         /* Used to handle the various types of I/O that can occur with
127            IPMI */
128         struct si_sm_io io;
129         int (*io_setup)(struct smi_info *info);
130         void (*io_cleanup)(struct smi_info *info);
131         int (*irq_setup)(struct smi_info *info);
132         void (*irq_cleanup)(struct smi_info *info);
133         unsigned int io_size;
134
135         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
136            is set to hold the flags until we are done handling everything
137            from the flags. */
138 #define RECEIVE_MSG_AVAIL       0x01
139 #define EVENT_MSG_BUFFER_FULL   0x02
140 #define WDT_PRE_TIMEOUT_INT     0x08
141         unsigned char       msg_flags;
142
143         /* If set to true, this will request events the next time the
144            state machine is idle. */
145         atomic_t            req_events;
146
147         /* If true, run the state machine to completion on every send
148            call.  Generally used after a panic to make sure stuff goes
149            out. */
150         int                 run_to_completion;
151
152         /* The I/O port of an SI interface. */
153         int                 port;
154
155         /* The space between start addresses of the two ports.  For
156            instance, if the first port is 0xca2 and the spacing is 4, then
157            the second port is 0xca6. */
158         unsigned int        spacing;
159
160         /* zero if no irq; */
161         int                 irq;
162
163         /* The timer for this si. */
164         struct timer_list   si_timer;
165
166         /* The time (in jiffies) the last timeout occurred at. */
167         unsigned long       last_timeout_jiffies;
168
169         /* Used to gracefully stop the timer without race conditions. */
170         volatile int        stop_operation;
171         volatile int        timer_stopped;
172
173         /* The driver will disable interrupts when it gets into a
174            situation where it cannot handle messages due to lack of
175            memory.  Once that situation clears up, it will re-enable
176            interrupts. */
177         int interrupt_disabled;
178
179         unsigned char ipmi_si_dev_rev;
180         unsigned char ipmi_si_fw_rev_major;
181         unsigned char ipmi_si_fw_rev_minor;
182         unsigned char ipmi_version_major;
183         unsigned char ipmi_version_minor;
184
185         /* Slave address, could be reported from DMI. */
186         unsigned char slave_addr;
187
188         /* Counters and things for the proc filesystem. */
189         spinlock_t count_lock;
190         unsigned long short_timeouts;
191         unsigned long long_timeouts;
192         unsigned long timeout_restarts;
193         unsigned long idles;
194         unsigned long interrupts;
195         unsigned long attentions;
196         unsigned long flag_fetches;
197         unsigned long hosed_count;
198         unsigned long complete_transactions;
199         unsigned long events;
200         unsigned long watchdog_pretimeouts;
201         unsigned long incoming_messages;
202 };
203
204 static void si_restart_short_timer(struct smi_info *smi_info);
205
206 static void deliver_recv_msg(struct smi_info *smi_info,
207                              struct ipmi_smi_msg *msg)
208 {
209         /* Deliver the message to the upper layer with the lock
210            released. */
211         spin_unlock(&(smi_info->si_lock));
212         ipmi_smi_msg_received(smi_info->intf, msg);
213         spin_lock(&(smi_info->si_lock));
214 }
215
216 static void return_hosed_msg(struct smi_info *smi_info)
217 {
218         struct ipmi_smi_msg *msg = smi_info->curr_msg;
219
220         /* Make it a reponse */
221         msg->rsp[0] = msg->data[0] | 4;
222         msg->rsp[1] = msg->data[1];
223         msg->rsp[2] = 0xFF; /* Unknown error. */
224         msg->rsp_size = 3;
225
226         smi_info->curr_msg = NULL;
227         deliver_recv_msg(smi_info, msg);
228 }
229
230 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
231 {
232         int              rv;
233         struct list_head *entry = NULL;
234 #ifdef DEBUG_TIMING
235         struct timeval t;
236 #endif
237
238         /* No need to save flags, we aleady have interrupts off and we
239            already hold the SMI lock. */
240         spin_lock(&(smi_info->msg_lock));
241
242         /* Pick the high priority queue first. */
243         if (! list_empty(&(smi_info->hp_xmit_msgs))) {
244                 entry = smi_info->hp_xmit_msgs.next;
245         } else if (! list_empty(&(smi_info->xmit_msgs))) {
246                 entry = smi_info->xmit_msgs.next;
247         }
248
249         if (!entry) {
250                 smi_info->curr_msg = NULL;
251                 rv = SI_SM_IDLE;
252         } else {
253                 int err;
254
255                 list_del(entry);
256                 smi_info->curr_msg = list_entry(entry,
257                                                 struct ipmi_smi_msg,
258                                                 link);
259 #ifdef DEBUG_TIMING
260                 do_gettimeofday(&t);
261                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
262 #endif
263                 err = smi_info->handlers->start_transaction(
264                         smi_info->si_sm,
265                         smi_info->curr_msg->data,
266                         smi_info->curr_msg->data_size);
267                 if (err) {
268                         return_hosed_msg(smi_info);
269                 }
270
271                 rv = SI_SM_CALL_WITHOUT_DELAY;
272         }
273         spin_unlock(&(smi_info->msg_lock));
274
275         return rv;
276 }
277
278 static void start_enable_irq(struct smi_info *smi_info)
279 {
280         unsigned char msg[2];
281
282         /* If we are enabling interrupts, we have to tell the
283            BMC to use them. */
284         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
285         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
286
287         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
288         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
289 }
290
291 static void start_clear_flags(struct smi_info *smi_info)
292 {
293         unsigned char msg[3];
294
295         /* Make sure the watchdog pre-timeout flag is not set at startup. */
296         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
297         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
298         msg[2] = WDT_PRE_TIMEOUT_INT;
299
300         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
301         smi_info->si_state = SI_CLEARING_FLAGS;
302 }
303
304 /* When we have a situtaion where we run out of memory and cannot
305    allocate messages, we just leave them in the BMC and run the system
306    polled until we can allocate some memory.  Once we have some
307    memory, we will re-enable the interrupt. */
308 static inline void disable_si_irq(struct smi_info *smi_info)
309 {
310         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
311                 disable_irq_nosync(smi_info->irq);
312                 smi_info->interrupt_disabled = 1;
313         }
314 }
315
316 static inline void enable_si_irq(struct smi_info *smi_info)
317 {
318         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
319                 enable_irq(smi_info->irq);
320                 smi_info->interrupt_disabled = 0;
321         }
322 }
323
324 static void handle_flags(struct smi_info *smi_info)
325 {
326         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
327                 /* Watchdog pre-timeout */
328                 spin_lock(&smi_info->count_lock);
329                 smi_info->watchdog_pretimeouts++;
330                 spin_unlock(&smi_info->count_lock);
331
332                 start_clear_flags(smi_info);
333                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
334                 spin_unlock(&(smi_info->si_lock));
335                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
336                 spin_lock(&(smi_info->si_lock));
337         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
338                 /* Messages available. */
339                 smi_info->curr_msg = ipmi_alloc_smi_msg();
340                 if (!smi_info->curr_msg) {
341                         disable_si_irq(smi_info);
342                         smi_info->si_state = SI_NORMAL;
343                         return;
344                 }
345                 enable_si_irq(smi_info);
346
347                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
348                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
349                 smi_info->curr_msg->data_size = 2;
350
351                 smi_info->handlers->start_transaction(
352                         smi_info->si_sm,
353                         smi_info->curr_msg->data,
354                         smi_info->curr_msg->data_size);
355                 smi_info->si_state = SI_GETTING_MESSAGES;
356         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
357                 /* Events available. */
358                 smi_info->curr_msg = ipmi_alloc_smi_msg();
359                 if (!smi_info->curr_msg) {
360                         disable_si_irq(smi_info);
361                         smi_info->si_state = SI_NORMAL;
362                         return;
363                 }
364                 enable_si_irq(smi_info);
365
366                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
367                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
368                 smi_info->curr_msg->data_size = 2;
369
370                 smi_info->handlers->start_transaction(
371                         smi_info->si_sm,
372                         smi_info->curr_msg->data,
373                         smi_info->curr_msg->data_size);
374                 smi_info->si_state = SI_GETTING_EVENTS;
375         } else {
376                 smi_info->si_state = SI_NORMAL;
377         }
378 }
379
380 static void handle_transaction_done(struct smi_info *smi_info)
381 {
382         struct ipmi_smi_msg *msg;
383 #ifdef DEBUG_TIMING
384         struct timeval t;
385
386         do_gettimeofday(&t);
387         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
388 #endif
389         switch (smi_info->si_state) {
390         case SI_NORMAL:
391                 if (!smi_info->curr_msg)
392                         break;
393
394                 smi_info->curr_msg->rsp_size
395                         = smi_info->handlers->get_result(
396                                 smi_info->si_sm,
397                                 smi_info->curr_msg->rsp,
398                                 IPMI_MAX_MSG_LENGTH);
399
400                 /* Do this here becase deliver_recv_msg() releases the
401                    lock, and a new message can be put in during the
402                    time the lock is released. */
403                 msg = smi_info->curr_msg;
404                 smi_info->curr_msg = NULL;
405                 deliver_recv_msg(smi_info, msg);
406                 break;
407
408         case SI_GETTING_FLAGS:
409         {
410                 unsigned char msg[4];
411                 unsigned int  len;
412
413                 /* We got the flags from the SMI, now handle them. */
414                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
415                 if (msg[2] != 0) {
416                         /* Error fetching flags, just give up for
417                            now. */
418                         smi_info->si_state = SI_NORMAL;
419                 } else if (len < 4) {
420                         /* Hmm, no flags.  That's technically illegal, but
421                            don't use uninitialized data. */
422                         smi_info->si_state = SI_NORMAL;
423                 } else {
424                         smi_info->msg_flags = msg[3];
425                         handle_flags(smi_info);
426                 }
427                 break;
428         }
429
430         case SI_CLEARING_FLAGS:
431         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
432         {
433                 unsigned char msg[3];
434
435                 /* We cleared the flags. */
436                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
437                 if (msg[2] != 0) {
438                         /* Error clearing flags */
439                         printk(KERN_WARNING
440                                "ipmi_si: Error clearing flags: %2.2x\n",
441                                msg[2]);
442                 }
443                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
444                         start_enable_irq(smi_info);
445                 else
446                         smi_info->si_state = SI_NORMAL;
447                 break;
448         }
449
450         case SI_GETTING_EVENTS:
451         {
452                 smi_info->curr_msg->rsp_size
453                         = smi_info->handlers->get_result(
454                                 smi_info->si_sm,
455                                 smi_info->curr_msg->rsp,
456                                 IPMI_MAX_MSG_LENGTH);
457
458                 /* Do this here becase deliver_recv_msg() releases the
459                    lock, and a new message can be put in during the
460                    time the lock is released. */
461                 msg = smi_info->curr_msg;
462                 smi_info->curr_msg = NULL;
463                 if (msg->rsp[2] != 0) {
464                         /* Error getting event, probably done. */
465                         msg->done(msg);
466
467                         /* Take off the event flag. */
468                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
469                         handle_flags(smi_info);
470                 } else {
471                         spin_lock(&smi_info->count_lock);
472                         smi_info->events++;
473                         spin_unlock(&smi_info->count_lock);
474
475                         /* Do this before we deliver the message
476                            because delivering the message releases the
477                            lock and something else can mess with the
478                            state. */
479                         handle_flags(smi_info);
480
481                         deliver_recv_msg(smi_info, msg);
482                 }
483                 break;
484         }
485
486         case SI_GETTING_MESSAGES:
487         {
488                 smi_info->curr_msg->rsp_size
489                         = smi_info->handlers->get_result(
490                                 smi_info->si_sm,
491                                 smi_info->curr_msg->rsp,
492                                 IPMI_MAX_MSG_LENGTH);
493
494                 /* Do this here becase deliver_recv_msg() releases the
495                    lock, and a new message can be put in during the
496                    time the lock is released. */
497                 msg = smi_info->curr_msg;
498                 smi_info->curr_msg = NULL;
499                 if (msg->rsp[2] != 0) {
500                         /* Error getting event, probably done. */
501                         msg->done(msg);
502
503                         /* Take off the msg flag. */
504                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
505                         handle_flags(smi_info);
506                 } else {
507                         spin_lock(&smi_info->count_lock);
508                         smi_info->incoming_messages++;
509                         spin_unlock(&smi_info->count_lock);
510
511                         /* Do this before we deliver the message
512                            because delivering the message releases the
513                            lock and something else can mess with the
514                            state. */
515                         handle_flags(smi_info);
516
517                         deliver_recv_msg(smi_info, msg);
518                 }
519                 break;
520         }
521
522         case SI_ENABLE_INTERRUPTS1:
523         {
524                 unsigned char msg[4];
525
526                 /* We got the flags from the SMI, now handle them. */
527                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
528                 if (msg[2] != 0) {
529                         printk(KERN_WARNING
530                                "ipmi_si: Could not enable interrupts"
531                                ", failed get, using polled mode.\n");
532                         smi_info->si_state = SI_NORMAL;
533                 } else {
534                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
535                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
536                         msg[2] = msg[3] | 1; /* enable msg queue int */
537                         smi_info->handlers->start_transaction(
538                                 smi_info->si_sm, msg, 3);
539                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
540                 }
541                 break;
542         }
543
544         case SI_ENABLE_INTERRUPTS2:
545         {
546                 unsigned char msg[4];
547
548                 /* We got the flags from the SMI, now handle them. */
549                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
550                 if (msg[2] != 0) {
551                         printk(KERN_WARNING
552                                "ipmi_si: Could not enable interrupts"
553                                ", failed set, using polled mode.\n");
554                 }
555                 smi_info->si_state = SI_NORMAL;
556                 break;
557         }
558         }
559 }
560
561 /* Called on timeouts and events.  Timeouts should pass the elapsed
562    time, interrupts should pass in zero. */
563 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
564                                            int time)
565 {
566         enum si_sm_result si_sm_result;
567
568  restart:
569         /* There used to be a loop here that waited a little while
570            (around 25us) before giving up.  That turned out to be
571            pointless, the minimum delays I was seeing were in the 300us
572            range, which is far too long to wait in an interrupt.  So
573            we just run until the state machine tells us something
574            happened or it needs a delay. */
575         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
576         time = 0;
577         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
578         {
579                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
580         }
581
582         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
583         {
584                 spin_lock(&smi_info->count_lock);
585                 smi_info->complete_transactions++;
586                 spin_unlock(&smi_info->count_lock);
587
588                 handle_transaction_done(smi_info);
589                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
590         }
591         else if (si_sm_result == SI_SM_HOSED)
592         {
593                 spin_lock(&smi_info->count_lock);
594                 smi_info->hosed_count++;
595                 spin_unlock(&smi_info->count_lock);
596
597                 /* Do the before return_hosed_msg, because that
598                    releases the lock. */
599                 smi_info->si_state = SI_NORMAL;
600                 if (smi_info->curr_msg != NULL) {
601                         /* If we were handling a user message, format
602                            a response to send to the upper layer to
603                            tell it about the error. */
604                         return_hosed_msg(smi_info);
605                 }
606                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
607         }
608
609         /* We prefer handling attn over new messages. */
610         if (si_sm_result == SI_SM_ATTN)
611         {
612                 unsigned char msg[2];
613
614                 spin_lock(&smi_info->count_lock);
615                 smi_info->attentions++;
616                 spin_unlock(&smi_info->count_lock);
617
618                 /* Got a attn, send down a get message flags to see
619                    what's causing it.  It would be better to handle
620                    this in the upper layer, but due to the way
621                    interrupts work with the SMI, that's not really
622                    possible. */
623                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
624                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
625
626                 smi_info->handlers->start_transaction(
627                         smi_info->si_sm, msg, 2);
628                 smi_info->si_state = SI_GETTING_FLAGS;
629                 goto restart;
630         }
631
632         /* If we are currently idle, try to start the next message. */
633         if (si_sm_result == SI_SM_IDLE) {
634                 spin_lock(&smi_info->count_lock);
635                 smi_info->idles++;
636                 spin_unlock(&smi_info->count_lock);
637
638                 si_sm_result = start_next_msg(smi_info);
639                 if (si_sm_result != SI_SM_IDLE)
640                         goto restart;
641         }
642
643         if ((si_sm_result == SI_SM_IDLE)
644             && (atomic_read(&smi_info->req_events)))
645         {
646                 /* We are idle and the upper layer requested that I fetch
647                    events, so do so. */
648                 unsigned char msg[2];
649
650                 spin_lock(&smi_info->count_lock);
651                 smi_info->flag_fetches++;
652                 spin_unlock(&smi_info->count_lock);
653
654                 atomic_set(&smi_info->req_events, 0);
655                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
656                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
657
658                 smi_info->handlers->start_transaction(
659                         smi_info->si_sm, msg, 2);
660                 smi_info->si_state = SI_GETTING_FLAGS;
661                 goto restart;
662         }
663
664         return si_sm_result;
665 }
666
667 static void sender(void                *send_info,
668                    struct ipmi_smi_msg *msg,
669                    int                 priority)
670 {
671         struct smi_info   *smi_info = send_info;
672         enum si_sm_result result;
673         unsigned long     flags;
674 #ifdef DEBUG_TIMING
675         struct timeval    t;
676 #endif
677
678         spin_lock_irqsave(&(smi_info->msg_lock), flags);
679 #ifdef DEBUG_TIMING
680         do_gettimeofday(&t);
681         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
682 #endif
683
684         if (smi_info->run_to_completion) {
685                 /* If we are running to completion, then throw it in
686                    the list and run transactions until everything is
687                    clear.  Priority doesn't matter here. */
688                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
689
690                 /* We have to release the msg lock and claim the smi
691                    lock in this case, because of race conditions. */
692                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
693
694                 spin_lock_irqsave(&(smi_info->si_lock), flags);
695                 result = smi_event_handler(smi_info, 0);
696                 while (result != SI_SM_IDLE) {
697                         udelay(SI_SHORT_TIMEOUT_USEC);
698                         result = smi_event_handler(smi_info,
699                                                    SI_SHORT_TIMEOUT_USEC);
700                 }
701                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
702                 return;
703         } else {
704                 if (priority > 0) {
705                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
706                 } else {
707                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
708                 }
709         }
710         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
711
712         spin_lock_irqsave(&(smi_info->si_lock), flags);
713         if ((smi_info->si_state == SI_NORMAL)
714             && (smi_info->curr_msg == NULL))
715         {
716                 start_next_msg(smi_info);
717                 si_restart_short_timer(smi_info);
718         }
719         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
720 }
721
722 static void set_run_to_completion(void *send_info, int i_run_to_completion)
723 {
724         struct smi_info   *smi_info = send_info;
725         enum si_sm_result result;
726         unsigned long     flags;
727
728         spin_lock_irqsave(&(smi_info->si_lock), flags);
729
730         smi_info->run_to_completion = i_run_to_completion;
731         if (i_run_to_completion) {
732                 result = smi_event_handler(smi_info, 0);
733                 while (result != SI_SM_IDLE) {
734                         udelay(SI_SHORT_TIMEOUT_USEC);
735                         result = smi_event_handler(smi_info,
736                                                    SI_SHORT_TIMEOUT_USEC);
737                 }
738         }
739
740         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
741 }
742
743 static void poll(void *send_info)
744 {
745         struct smi_info *smi_info = send_info;
746
747         smi_event_handler(smi_info, 0);
748 }
749
750 static void request_events(void *send_info)
751 {
752         struct smi_info *smi_info = send_info;
753
754         atomic_set(&smi_info->req_events, 1);
755 }
756
757 static int initialized = 0;
758
759 /* Must be called with interrupts off and with the si_lock held. */
760 static void si_restart_short_timer(struct smi_info *smi_info)
761 {
762 #if defined(CONFIG_HIGH_RES_TIMERS)
763         unsigned long flags;
764         unsigned long jiffies_now;
765
766         if (del_timer(&(smi_info->si_timer))) {
767                 /* If we don't delete the timer, then it will go off
768                    immediately, anyway.  So we only process if we
769                    actually delete the timer. */
770
771                 /* We already have irqsave on, so no need for it
772                    here. */
773                 read_lock(&xtime_lock);
774                 jiffies_now = jiffies;
775                 smi_info->si_timer.expires = jiffies_now;
776                 smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now);
777
778                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
779
780                 add_timer(&(smi_info->si_timer));
781                 spin_lock_irqsave(&smi_info->count_lock, flags);
782                 smi_info->timeout_restarts++;
783                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
784         }
785 #endif
786 }
787
788 static void smi_timeout(unsigned long data)
789 {
790         struct smi_info   *smi_info = (struct smi_info *) data;
791         enum si_sm_result smi_result;
792         unsigned long     flags;
793         unsigned long     jiffies_now;
794         unsigned long     time_diff;
795 #ifdef DEBUG_TIMING
796         struct timeval    t;
797 #endif
798
799         if (smi_info->stop_operation) {
800                 smi_info->timer_stopped = 1;
801                 return;
802         }
803
804         spin_lock_irqsave(&(smi_info->si_lock), flags);
805 #ifdef DEBUG_TIMING
806         do_gettimeofday(&t);
807         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
808 #endif
809         jiffies_now = jiffies;
810         time_diff = ((jiffies_now - smi_info->last_timeout_jiffies)
811                      * SI_USEC_PER_JIFFY);
812         smi_result = smi_event_handler(smi_info, time_diff);
813
814         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
815
816         smi_info->last_timeout_jiffies = jiffies_now;
817
818         if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
819                 /* Running with interrupts, only do long timeouts. */
820                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
821                 spin_lock_irqsave(&smi_info->count_lock, flags);
822                 smi_info->long_timeouts++;
823                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
824                 goto do_add_timer;
825         }
826
827         /* If the state machine asks for a short delay, then shorten
828            the timer timeout. */
829         if (smi_result == SI_SM_CALL_WITH_DELAY) {
830                 spin_lock_irqsave(&smi_info->count_lock, flags);
831                 smi_info->short_timeouts++;
832                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
833 #if defined(CONFIG_HIGH_RES_TIMERS)
834                 read_lock(&xtime_lock);
835                 smi_info->si_timer.expires = jiffies;
836                 smi_info->si_timer.sub_expires
837                         = get_arch_cycles(smi_info->si_timer.expires);
838                 read_unlock(&xtime_lock);
839                 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
840 #else
841                 smi_info->si_timer.expires = jiffies + 1;
842 #endif
843         } else {
844                 spin_lock_irqsave(&smi_info->count_lock, flags);
845                 smi_info->long_timeouts++;
846                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
847                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
848 #if defined(CONFIG_HIGH_RES_TIMERS)
849                 smi_info->si_timer.sub_expires = 0;
850 #endif
851         }
852
853  do_add_timer:
854         add_timer(&(smi_info->si_timer));
855 }
856
857 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
858 {
859         struct smi_info *smi_info = data;
860         unsigned long   flags;
861 #ifdef DEBUG_TIMING
862         struct timeval  t;
863 #endif
864
865         spin_lock_irqsave(&(smi_info->si_lock), flags);
866
867         spin_lock(&smi_info->count_lock);
868         smi_info->interrupts++;
869         spin_unlock(&smi_info->count_lock);
870
871         if (smi_info->stop_operation)
872                 goto out;
873
874 #ifdef DEBUG_TIMING
875         do_gettimeofday(&t);
876         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
877 #endif
878         smi_event_handler(smi_info, 0);
879  out:
880         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
881         return IRQ_HANDLED;
882 }
883
884 static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
885 {
886         struct smi_info *smi_info = data;
887         /* We need to clear the IRQ flag for the BT interface. */
888         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
889                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
890                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
891         return si_irq_handler(irq, data, regs);
892 }
893
894
895 static struct ipmi_smi_handlers handlers =
896 {
897         .owner                  = THIS_MODULE,
898         .sender                 = sender,
899         .request_events         = request_events,
900         .set_run_to_completion  = set_run_to_completion,
901         .poll                   = poll,
902 };
903
904 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
905    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
906
907 #define SI_MAX_PARMS 4
908 #define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
909 static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
910 { NULL, NULL, NULL, NULL };
911
912 #define DEVICE_NAME "ipmi_si"
913
914 #define DEFAULT_KCS_IO_PORT     0xca2
915 #define DEFAULT_SMIC_IO_PORT    0xca9
916 #define DEFAULT_BT_IO_PORT      0xe4
917 #define DEFAULT_REGSPACING      1
918
919 static int           si_trydefaults = 1;
920 static char          *si_type[SI_MAX_PARMS];
921 #define MAX_SI_TYPE_STR 30
922 static char          si_type_str[MAX_SI_TYPE_STR];
923 static unsigned long addrs[SI_MAX_PARMS];
924 static int num_addrs;
925 static unsigned int  ports[SI_MAX_PARMS];
926 static int num_ports;
927 static int           irqs[SI_MAX_PARMS];
928 static int num_irqs;
929 static int           regspacings[SI_MAX_PARMS];
930 static int num_regspacings = 0;
931 static int           regsizes[SI_MAX_PARMS];
932 static int num_regsizes = 0;
933 static int           regshifts[SI_MAX_PARMS];
934 static int num_regshifts = 0;
935 static int slave_addrs[SI_MAX_PARMS];
936 static int num_slave_addrs = 0;
937
938
939 module_param_named(trydefaults, si_trydefaults, bool, 0);
940 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
941                  " default scan of the KCS and SMIC interface at the standard"
942                  " address");
943 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
944 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
945                  " interface separated by commas.  The types are 'kcs',"
946                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
947                  " the first interface to kcs and the second to bt");
948 module_param_array(addrs, long, &num_addrs, 0);
949 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
950                  " addresses separated by commas.  Only use if an interface"
951                  " is in memory.  Otherwise, set it to zero or leave"
952                  " it blank.");
953 module_param_array(ports, int, &num_ports, 0);
954 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
955                  " addresses separated by commas.  Only use if an interface"
956                  " is a port.  Otherwise, set it to zero or leave"
957                  " it blank.");
958 module_param_array(irqs, int, &num_irqs, 0);
959 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
960                  " addresses separated by commas.  Only use if an interface"
961                  " has an interrupt.  Otherwise, set it to zero or leave"
962                  " it blank.");
963 module_param_array(regspacings, int, &num_regspacings, 0);
964 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
965                  " and each successive register used by the interface.  For"
966                  " instance, if the start address is 0xca2 and the spacing"
967                  " is 2, then the second address is at 0xca4.  Defaults"
968                  " to 1.");
969 module_param_array(regsizes, int, &num_regsizes, 0);
970 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
971                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
972                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
973                  " the 8-bit IPMI register has to be read from a larger"
974                  " register.");
975 module_param_array(regshifts, int, &num_regshifts, 0);
976 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
977                  " IPMI register, in bits.  For instance, if the data"
978                  " is read from a 32-bit word and the IPMI data is in"
979                  " bit 8-15, then the shift would be 8");
980 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
981 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
982                  " the controller.  Normally this is 0x20, but can be"
983                  " overridden by this parm.  This is an array indexed"
984                  " by interface number.");
985
986
987 #define IPMI_MEM_ADDR_SPACE 1
988 #define IPMI_IO_ADDR_SPACE  2
989
990 #if defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)
991 static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
992 {
993         int i;
994
995         for (i = 0; i < SI_MAX_PARMS; ++i) {
996                 /* Don't check our address. */
997                 if (i == intf)
998                         continue;
999                 if (si_type[i] != NULL) {
1000                         if ((addr_space == IPMI_MEM_ADDR_SPACE &&
1001                              base_addr == addrs[i]) ||
1002                             (addr_space == IPMI_IO_ADDR_SPACE &&
1003                              base_addr == ports[i]))
1004                                 return 0;
1005                 }
1006                 else
1007                         break;
1008         }
1009
1010         return 1;
1011 }
1012 #endif
1013
1014 static int std_irq_setup(struct smi_info *info)
1015 {
1016         int rv;
1017
1018         if (!info->irq)
1019                 return 0;
1020
1021         if (info->si_type == SI_BT) {
1022                 rv = request_irq(info->irq,
1023                                  si_bt_irq_handler,
1024                                  SA_INTERRUPT,
1025                                  DEVICE_NAME,
1026                                  info);
1027                 if (!rv)
1028                         /* Enable the interrupt in the BT interface. */
1029                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1030                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1031         } else
1032                 rv = request_irq(info->irq,
1033                                  si_irq_handler,
1034                                  SA_INTERRUPT,
1035                                  DEVICE_NAME,
1036                                  info);
1037         if (rv) {
1038                 printk(KERN_WARNING
1039                        "ipmi_si: %s unable to claim interrupt %d,"
1040                        " running polled\n",
1041                        DEVICE_NAME, info->irq);
1042                 info->irq = 0;
1043         } else {
1044                 printk("  Using irq %d\n", info->irq);
1045         }
1046
1047         return rv;
1048 }
1049
1050 static void std_irq_cleanup(struct smi_info *info)
1051 {
1052         if (!info->irq)
1053                 return;
1054
1055         if (info->si_type == SI_BT)
1056                 /* Disable the interrupt in the BT interface. */
1057                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1058         free_irq(info->irq, info);
1059 }
1060
1061 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1062 {
1063         unsigned int *addr = io->info;
1064
1065         return inb((*addr)+(offset*io->regspacing));
1066 }
1067
1068 static void port_outb(struct si_sm_io *io, unsigned int offset,
1069                       unsigned char b)
1070 {
1071         unsigned int *addr = io->info;
1072
1073         outb(b, (*addr)+(offset * io->regspacing));
1074 }
1075
1076 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1077 {
1078         unsigned int *addr = io->info;
1079
1080         return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1081 }
1082
1083 static void port_outw(struct si_sm_io *io, unsigned int offset,
1084                       unsigned char b)
1085 {
1086         unsigned int *addr = io->info;
1087
1088         outw(b << io->regshift, (*addr)+(offset * io->regspacing));
1089 }
1090
1091 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1092 {
1093         unsigned int *addr = io->info;
1094
1095         return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1096 }
1097
1098 static void port_outl(struct si_sm_io *io, unsigned int offset,
1099                       unsigned char b)
1100 {
1101         unsigned int *addr = io->info;
1102
1103         outl(b << io->regshift, (*addr)+(offset * io->regspacing));
1104 }
1105
1106 static void port_cleanup(struct smi_info *info)
1107 {
1108         unsigned int *addr = info->io.info;
1109         int           mapsize;
1110
1111         if (addr && (*addr)) {
1112                 mapsize = ((info->io_size * info->io.regspacing)
1113                            - (info->io.regspacing - info->io.regsize));
1114
1115                 release_region (*addr, mapsize);
1116         }
1117         kfree(info);
1118 }
1119
1120 static int port_setup(struct smi_info *info)
1121 {
1122         unsigned int *addr = info->io.info;
1123         int           mapsize;
1124
1125         if (!addr || (!*addr))
1126                 return -ENODEV;
1127
1128         info->io_cleanup = port_cleanup;
1129
1130         /* Figure out the actual inb/inw/inl/etc routine to use based
1131            upon the register size. */
1132         switch (info->io.regsize) {
1133         case 1:
1134                 info->io.inputb = port_inb;
1135                 info->io.outputb = port_outb;
1136                 break;
1137         case 2:
1138                 info->io.inputb = port_inw;
1139                 info->io.outputb = port_outw;
1140                 break;
1141         case 4:
1142                 info->io.inputb = port_inl;
1143                 info->io.outputb = port_outl;
1144                 break;
1145         default:
1146                 printk("ipmi_si: Invalid register size: %d\n",
1147                        info->io.regsize);
1148                 return -EINVAL;
1149         }
1150
1151         /* Calculate the total amount of memory to claim.  This is an
1152          * unusual looking calculation, but it avoids claiming any
1153          * more memory than it has to.  It will claim everything
1154          * between the first address to the end of the last full
1155          * register. */
1156         mapsize = ((info->io_size * info->io.regspacing)
1157                    - (info->io.regspacing - info->io.regsize));
1158
1159         if (request_region(*addr, mapsize, DEVICE_NAME) == NULL)
1160                 return -EIO;
1161         return 0;
1162 }
1163
1164 static int try_init_port(int intf_num, struct smi_info **new_info)
1165 {
1166         struct smi_info *info;
1167
1168         if (!ports[intf_num])
1169                 return -ENODEV;
1170
1171         if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1172                               ports[intf_num]))
1173                 return -ENODEV;
1174
1175         info = kmalloc(sizeof(*info), GFP_KERNEL);
1176         if (!info) {
1177                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1178                 return -ENOMEM;
1179         }
1180         memset(info, 0, sizeof(*info));
1181
1182         info->io_setup = port_setup;
1183         info->io.info = &(ports[intf_num]);
1184         info->io.addr = NULL;
1185         info->io.regspacing = regspacings[intf_num];
1186         if (!info->io.regspacing)
1187                 info->io.regspacing = DEFAULT_REGSPACING;
1188         info->io.regsize = regsizes[intf_num];
1189         if (!info->io.regsize)
1190                 info->io.regsize = DEFAULT_REGSPACING;
1191         info->io.regshift = regshifts[intf_num];
1192         info->irq = 0;
1193         info->irq_setup = NULL;
1194         *new_info = info;
1195
1196         if (si_type[intf_num] == NULL)
1197                 si_type[intf_num] = "kcs";
1198
1199         printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1200                si_type[intf_num], ports[intf_num]);
1201         return 0;
1202 }
1203
1204 static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
1205 {
1206         return readb((io->addr)+(offset * io->regspacing));
1207 }
1208
1209 static void mem_outb(struct si_sm_io *io, unsigned int offset,
1210                      unsigned char b)
1211 {
1212         writeb(b, (io->addr)+(offset * io->regspacing));
1213 }
1214
1215 static unsigned char mem_inw(struct si_sm_io *io, unsigned int offset)
1216 {
1217         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1218                 && 0xff;
1219 }
1220
1221 static void mem_outw(struct si_sm_io *io, unsigned int offset,
1222                      unsigned char b)
1223 {
1224         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1225 }
1226
1227 static unsigned char mem_inl(struct si_sm_io *io, unsigned int offset)
1228 {
1229         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1230                 && 0xff;
1231 }
1232
1233 static void mem_outl(struct si_sm_io *io, unsigned int offset,
1234                      unsigned char b)
1235 {
1236         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1237 }
1238
1239 #ifdef readq
1240 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1241 {
1242         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1243                 && 0xff;
1244 }
1245
1246 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1247                      unsigned char b)
1248 {
1249         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1250 }
1251 #endif
1252
1253 static void mem_cleanup(struct smi_info *info)
1254 {
1255         unsigned long *addr = info->io.info;
1256         int           mapsize;
1257
1258         if (info->io.addr) {
1259                 iounmap(info->io.addr);
1260
1261                 mapsize = ((info->io_size * info->io.regspacing)
1262                            - (info->io.regspacing - info->io.regsize));
1263
1264                 release_mem_region(*addr, mapsize);
1265         }
1266         kfree(info);
1267 }
1268
1269 static int mem_setup(struct smi_info *info)
1270 {
1271         unsigned long *addr = info->io.info;
1272         int           mapsize;
1273
1274         if (!addr || (!*addr))
1275                 return -ENODEV;
1276
1277         info->io_cleanup = mem_cleanup;
1278
1279         /* Figure out the actual readb/readw/readl/etc routine to use based
1280            upon the register size. */
1281         switch (info->io.regsize) {
1282         case 1:
1283                 info->io.inputb = mem_inb;
1284                 info->io.outputb = mem_outb;
1285                 break;
1286         case 2:
1287                 info->io.inputb = mem_inw;
1288                 info->io.outputb = mem_outw;
1289                 break;
1290         case 4:
1291                 info->io.inputb = mem_inl;
1292                 info->io.outputb = mem_outl;
1293                 break;
1294 #ifdef readq
1295         case 8:
1296                 info->io.inputb = mem_inq;
1297                 info->io.outputb = mem_outq;
1298                 break;
1299 #endif
1300         default:
1301                 printk("ipmi_si: Invalid register size: %d\n",
1302                        info->io.regsize);
1303                 return -EINVAL;
1304         }
1305
1306         /* Calculate the total amount of memory to claim.  This is an
1307          * unusual looking calculation, but it avoids claiming any
1308          * more memory than it has to.  It will claim everything
1309          * between the first address to the end of the last full
1310          * register. */
1311         mapsize = ((info->io_size * info->io.regspacing)
1312                    - (info->io.regspacing - info->io.regsize));
1313
1314         if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL)
1315                 return -EIO;
1316
1317         info->io.addr = ioremap(*addr, mapsize);
1318         if (info->io.addr == NULL) {
1319                 release_mem_region(*addr, mapsize);
1320                 return -EIO;
1321         }
1322         return 0;
1323 }
1324
1325 static int try_init_mem(int intf_num, struct smi_info **new_info)
1326 {
1327         struct smi_info *info;
1328
1329         if (!addrs[intf_num])
1330                 return -ENODEV;
1331
1332         if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1333                               addrs[intf_num]))
1334                 return -ENODEV;
1335
1336         info = kmalloc(sizeof(*info), GFP_KERNEL);
1337         if (!info) {
1338                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1339                 return -ENOMEM;
1340         }
1341         memset(info, 0, sizeof(*info));
1342
1343         info->io_setup = mem_setup;
1344         info->io.info = &addrs[intf_num];
1345         info->io.addr = NULL;
1346         info->io.regspacing = regspacings[intf_num];
1347         if (!info->io.regspacing)
1348                 info->io.regspacing = DEFAULT_REGSPACING;
1349         info->io.regsize = regsizes[intf_num];
1350         if (!info->io.regsize)
1351                 info->io.regsize = DEFAULT_REGSPACING;
1352         info->io.regshift = regshifts[intf_num];
1353         info->irq = 0;
1354         info->irq_setup = NULL;
1355         *new_info = info;
1356
1357         if (si_type[intf_num] == NULL)
1358                 si_type[intf_num] = "kcs";
1359
1360         printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
1361                si_type[intf_num], addrs[intf_num]);
1362         return 0;
1363 }
1364
1365
1366 #ifdef CONFIG_ACPI_INTERPRETER
1367
1368 #include <linux/acpi.h>
1369
1370 /* Once we get an ACPI failure, we don't try any more, because we go
1371    through the tables sequentially.  Once we don't find a table, there
1372    are no more. */
1373 static int acpi_failure = 0;
1374
1375 /* For GPE-type interrupts. */
1376 static u32 ipmi_acpi_gpe(void *context)
1377 {
1378         struct smi_info *smi_info = context;
1379         unsigned long   flags;
1380 #ifdef DEBUG_TIMING
1381         struct timeval t;
1382 #endif
1383
1384         spin_lock_irqsave(&(smi_info->si_lock), flags);
1385
1386         spin_lock(&smi_info->count_lock);
1387         smi_info->interrupts++;
1388         spin_unlock(&smi_info->count_lock);
1389
1390         if (smi_info->stop_operation)
1391                 goto out;
1392
1393 #ifdef DEBUG_TIMING
1394         do_gettimeofday(&t);
1395         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1396 #endif
1397         smi_event_handler(smi_info, 0);
1398  out:
1399         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1400
1401         return ACPI_INTERRUPT_HANDLED;
1402 }
1403
1404 static int acpi_gpe_irq_setup(struct smi_info *info)
1405 {
1406         acpi_status status;
1407
1408         if (!info->irq)
1409                 return 0;
1410
1411         /* FIXME - is level triggered right? */
1412         status = acpi_install_gpe_handler(NULL,
1413                                           info->irq,
1414                                           ACPI_GPE_LEVEL_TRIGGERED,
1415                                           &ipmi_acpi_gpe,
1416                                           info);
1417         if (status != AE_OK) {
1418                 printk(KERN_WARNING
1419                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1420                        " running polled\n",
1421                        DEVICE_NAME, info->irq);
1422                 info->irq = 0;
1423                 return -EINVAL;
1424         } else {
1425                 printk("  Using ACPI GPE %d\n", info->irq);
1426                 return 0;
1427         }
1428 }
1429
1430 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1431 {
1432         if (!info->irq)
1433                 return;
1434
1435         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1436 }
1437
1438 /*
1439  * Defined at
1440  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1441  */
1442 struct SPMITable {
1443         s8      Signature[4];
1444         u32     Length;
1445         u8      Revision;
1446         u8      Checksum;
1447         s8      OEMID[6];
1448         s8      OEMTableID[8];
1449         s8      OEMRevision[4];
1450         s8      CreatorID[4];
1451         s8      CreatorRevision[4];
1452         u8      InterfaceType;
1453         u8      IPMIlegacy;
1454         s16     SpecificationRevision;
1455
1456         /*
1457          * Bit 0 - SCI interrupt supported
1458          * Bit 1 - I/O APIC/SAPIC
1459          */
1460         u8      InterruptType;
1461
1462         /* If bit 0 of InterruptType is set, then this is the SCI
1463            interrupt in the GPEx_STS register. */
1464         u8      GPE;
1465
1466         s16     Reserved;
1467
1468         /* If bit 1 of InterruptType is set, then this is the I/O
1469            APIC/SAPIC interrupt. */
1470         u32     GlobalSystemInterrupt;
1471
1472         /* The actual register address. */
1473         struct acpi_generic_address addr;
1474
1475         u8      UID[4];
1476
1477         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1478 };
1479
1480 static int try_init_acpi(int intf_num, struct smi_info **new_info)
1481 {
1482         struct smi_info  *info;
1483         acpi_status      status;
1484         struct SPMITable *spmi;
1485         char             *io_type;
1486         u8               addr_space;
1487
1488         if (acpi_failure)
1489                 return -ENODEV;
1490
1491         status = acpi_get_firmware_table("SPMI", intf_num+1,
1492                                          ACPI_LOGICAL_ADDRESSING,
1493                                          (struct acpi_table_header **) &spmi);
1494         if (status != AE_OK) {
1495                 acpi_failure = 1;
1496                 return -ENODEV;
1497         }
1498
1499         if (spmi->IPMIlegacy != 1) {
1500             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1501             return -ENODEV;
1502         }
1503
1504         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1505                 addr_space = IPMI_MEM_ADDR_SPACE;
1506         else
1507                 addr_space = IPMI_IO_ADDR_SPACE;
1508         if (!is_new_interface(-1, addr_space, spmi->addr.address))
1509                 return -ENODEV;
1510
1511         if (!spmi->addr.register_bit_width) {
1512                 acpi_failure = 1;
1513                 return -ENODEV;
1514         }
1515
1516         /* Figure out the interface type. */
1517         switch (spmi->InterfaceType)
1518         {
1519         case 1: /* KCS */
1520                 si_type[intf_num] = "kcs";
1521                 break;
1522
1523         case 2: /* SMIC */
1524                 si_type[intf_num] = "smic";
1525                 break;
1526
1527         case 3: /* BT */
1528                 si_type[intf_num] = "bt";
1529                 break;
1530
1531         default:
1532                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1533                         spmi->InterfaceType);
1534                 return -EIO;
1535         }
1536
1537         info = kmalloc(sizeof(*info), GFP_KERNEL);
1538         if (!info) {
1539                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1540                 return -ENOMEM;
1541         }
1542         memset(info, 0, sizeof(*info));
1543
1544         if (spmi->InterruptType & 1) {
1545                 /* We've got a GPE interrupt. */
1546                 info->irq = spmi->GPE;
1547                 info->irq_setup = acpi_gpe_irq_setup;
1548                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1549         } else if (spmi->InterruptType & 2) {
1550                 /* We've got an APIC/SAPIC interrupt. */
1551                 info->irq = spmi->GlobalSystemInterrupt;
1552                 info->irq_setup = std_irq_setup;
1553                 info->irq_cleanup = std_irq_cleanup;
1554         } else {
1555                 /* Use the default interrupt setting. */
1556                 info->irq = 0;
1557                 info->irq_setup = NULL;
1558         }
1559
1560         if (spmi->addr.register_bit_width) {
1561                 /* A (hopefully) properly formed register bit width. */
1562                 regspacings[intf_num] = spmi->addr.register_bit_width / 8;
1563                 info->io.regspacing = spmi->addr.register_bit_width / 8;
1564         } else {
1565                 /* Some broken systems get this wrong and set the value
1566                  * to zero.  Assume it is the default spacing.  If that
1567                  * is wrong, too bad, the vendor should fix the tables. */
1568                 regspacings[intf_num] = DEFAULT_REGSPACING;
1569                 info->io.regspacing = DEFAULT_REGSPACING;
1570         }
1571         regsizes[intf_num] = regspacings[intf_num];
1572         info->io.regsize = regsizes[intf_num];
1573         regshifts[intf_num] = spmi->addr.register_bit_offset;
1574         info->io.regshift = regshifts[intf_num];
1575
1576         if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1577                 io_type = "memory";
1578                 info->io_setup = mem_setup;
1579                 addrs[intf_num] = spmi->addr.address;
1580                 info->io.info = &(addrs[intf_num]);
1581         } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1582                 io_type = "I/O";
1583                 info->io_setup = port_setup;
1584                 ports[intf_num] = spmi->addr.address;
1585                 info->io.info = &(ports[intf_num]);
1586         } else {
1587                 kfree(info);
1588                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1589                 return -EIO;
1590         }
1591
1592         *new_info = info;
1593
1594         printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1595                si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1596         return 0;
1597 }
1598 #endif
1599
1600 #ifdef CONFIG_X86
1601 typedef struct dmi_ipmi_data
1602 {
1603         u8              type;
1604         u8              addr_space;
1605         unsigned long   base_addr;
1606         u8              irq;
1607         u8              offset;
1608         u8              slave_addr;
1609 } dmi_ipmi_data_t;
1610
1611 static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
1612 static int dmi_data_entries;
1613
1614 static int __init decode_dmi(struct dmi_header *dm, int intf_num)
1615 {
1616         u8 *data = (u8 *)dm;
1617         unsigned long   base_addr;
1618         u8              reg_spacing;
1619         u8              len = dm->length;
1620         dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1621
1622         ipmi_data->type = data[4];
1623
1624         memcpy(&base_addr, data+8, sizeof(unsigned long));
1625         if (len >= 0x11) {
1626                 if (base_addr & 1) {
1627                         /* I/O */
1628                         base_addr &= 0xFFFE;
1629                         ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1630                 }
1631                 else {
1632                         /* Memory */
1633                         ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
1634                 }
1635                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1636                    is odd. */
1637                 ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1638
1639                 ipmi_data->irq = data[0x11];
1640
1641                 /* The top two bits of byte 0x10 hold the register spacing. */
1642                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1643                 switch(reg_spacing){
1644                 case 0x00: /* Byte boundaries */
1645                     ipmi_data->offset = 1;
1646                     break;
1647                 case 0x01: /* 32-bit boundaries */
1648                     ipmi_data->offset = 4;
1649                     break;
1650                 case 0x02: /* 16-byte boundaries */
1651                     ipmi_data->offset = 16;
1652                     break;
1653                 default:
1654                     /* Some other interface, just ignore it. */
1655                     return -EIO;
1656                 }
1657         } else {
1658                 /* Old DMI spec. */
1659                 /* Note that technically, the lower bit of the base
1660                  * address should be 1 if the address is I/O and 0 if
1661                  * the address is in memory.  So many systems get that
1662                  * wrong (and all that I have seen are I/O) so we just
1663                  * ignore that bit and assume I/O.  Systems that use
1664                  * memory should use the newer spec, anyway. */
1665                 ipmi_data->base_addr = base_addr & 0xfffe;
1666                 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1667                 ipmi_data->offset = 1;
1668         }
1669
1670         ipmi_data->slave_addr = data[6];
1671
1672         if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
1673                 dmi_data_entries++;
1674                 return 0;
1675         }
1676
1677         memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t));
1678
1679         return -1;
1680 }
1681
1682 static void __init dmi_find_bmc(void)
1683 {
1684         struct dmi_device *dev = NULL;
1685         int intf_num = 0;
1686
1687         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1688                 if (intf_num >= SI_MAX_DRIVERS)
1689                         break;
1690
1691                 decode_dmi((struct dmi_header *) dev->device_data, intf_num++);
1692         }
1693 }
1694
1695 static int try_init_smbios(int intf_num, struct smi_info **new_info)
1696 {
1697         struct smi_info   *info;
1698         dmi_ipmi_data_t   *ipmi_data = dmi_data+intf_num;
1699         char              *io_type;
1700
1701         if (intf_num >= dmi_data_entries)
1702                 return -ENODEV;
1703
1704         switch(ipmi_data->type) {
1705                 case 0x01: /* KCS */
1706                         si_type[intf_num] = "kcs";
1707                         break;
1708                 case 0x02: /* SMIC */
1709                         si_type[intf_num] = "smic";
1710                         break;
1711                 case 0x03: /* BT */
1712                         si_type[intf_num] = "bt";
1713                         break;
1714                 default:
1715                         return -EIO;
1716         }
1717
1718         info = kmalloc(sizeof(*info), GFP_KERNEL);
1719         if (!info) {
1720                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1721                 return -ENOMEM;
1722         }
1723         memset(info, 0, sizeof(*info));
1724
1725         if (ipmi_data->addr_space == 1) {
1726                 io_type = "memory";
1727                 info->io_setup = mem_setup;
1728                 addrs[intf_num] = ipmi_data->base_addr;
1729                 info->io.info = &(addrs[intf_num]);
1730         } else if (ipmi_data->addr_space == 2) {
1731                 io_type = "I/O";
1732                 info->io_setup = port_setup;
1733                 ports[intf_num] = ipmi_data->base_addr;
1734                 info->io.info = &(ports[intf_num]);
1735         } else {
1736                 kfree(info);
1737                 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
1738                 return -EIO;
1739         }
1740
1741         regspacings[intf_num] = ipmi_data->offset;
1742         info->io.regspacing = regspacings[intf_num];
1743         if (!info->io.regspacing)
1744                 info->io.regspacing = DEFAULT_REGSPACING;
1745         info->io.regsize = DEFAULT_REGSPACING;
1746         info->io.regshift = regshifts[intf_num];
1747
1748         info->slave_addr = ipmi_data->slave_addr;
1749
1750         irqs[intf_num] = ipmi_data->irq;
1751
1752         *new_info = info;
1753
1754         printk("ipmi_si: Found SMBIOS-specified state machine at %s"
1755                " address 0x%lx, slave address 0x%x\n",
1756                io_type, (unsigned long)ipmi_data->base_addr,
1757                ipmi_data->slave_addr);
1758         return 0;
1759 }
1760 #endif /* CONFIG_X86 */
1761
1762 #ifdef CONFIG_PCI
1763
1764 #define PCI_ERMC_CLASSCODE  0x0C0700
1765 #define PCI_HP_VENDOR_ID    0x103C
1766 #define PCI_MMC_DEVICE_ID   0x121A
1767 #define PCI_MMC_ADDR_CW     0x10
1768
1769 /* Avoid more than one attempt to probe pci smic. */
1770 static int pci_smic_checked = 0;
1771
1772 static int find_pci_smic(int intf_num, struct smi_info **new_info)
1773 {
1774         struct smi_info  *info;
1775         int              error;
1776         struct pci_dev   *pci_dev = NULL;
1777         u16              base_addr;
1778         int              fe_rmc = 0;
1779
1780         if (pci_smic_checked)
1781                 return -ENODEV;
1782
1783         pci_smic_checked = 1;
1784
1785         if ((pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID,
1786                                        NULL)))
1787                 ;
1788         else if ((pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL)) &&
1789                  pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)
1790                 fe_rmc = 1;
1791         else
1792                 return -ENODEV;
1793
1794         error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1795         if (error)
1796         {
1797                 pci_dev_put(pci_dev);
1798                 printk(KERN_ERR
1799                        "ipmi_si: pci_read_config_word() failed (%d).\n",
1800                        error);
1801                 return -ENODEV;
1802         }
1803
1804         /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1805         if (!(base_addr & 0x0001))
1806         {
1807                 pci_dev_put(pci_dev);
1808                 printk(KERN_ERR
1809                        "ipmi_si: memory mapped I/O not supported for PCI"
1810                        " smic.\n");
1811                 return -ENODEV;
1812         }
1813
1814         base_addr &= 0xFFFE;
1815         if (!fe_rmc)
1816                 /* Data register starts at base address + 1 in eRMC */
1817                 ++base_addr;
1818
1819         if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
1820                 pci_dev_put(pci_dev);
1821                 return -ENODEV;
1822         }
1823
1824         info = kmalloc(sizeof(*info), GFP_KERNEL);
1825         if (!info) {
1826                 pci_dev_put(pci_dev);
1827                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1828                 return -ENOMEM;
1829         }
1830         memset(info, 0, sizeof(*info));
1831
1832         info->io_setup = port_setup;
1833         ports[intf_num] = base_addr;
1834         info->io.info = &(ports[intf_num]);
1835         info->io.regspacing = regspacings[intf_num];
1836         if (!info->io.regspacing)
1837                 info->io.regspacing = DEFAULT_REGSPACING;
1838         info->io.regsize = DEFAULT_REGSPACING;
1839         info->io.regshift = regshifts[intf_num];
1840
1841         *new_info = info;
1842
1843         irqs[intf_num] = pci_dev->irq;
1844         si_type[intf_num] = "smic";
1845
1846         printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
1847                 (long unsigned int) base_addr);
1848
1849         pci_dev_put(pci_dev);
1850         return 0;
1851 }
1852 #endif /* CONFIG_PCI */
1853
1854 static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1855 {
1856 #ifdef CONFIG_PCI
1857         if (find_pci_smic(intf_num, new_info)==0)
1858                 return 0;
1859 #endif
1860         /* Include other methods here. */
1861
1862         return -ENODEV;
1863 }
1864
1865
1866 static int try_get_dev_id(struct smi_info *smi_info)
1867 {
1868         unsigned char      msg[2];
1869         unsigned char      *resp;
1870         unsigned long      resp_len;
1871         enum si_sm_result smi_result;
1872         int               rv = 0;
1873
1874         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1875         if (!resp)
1876                 return -ENOMEM;
1877
1878         /* Do a Get Device ID command, since it comes back with some
1879            useful info. */
1880         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1881         msg[1] = IPMI_GET_DEVICE_ID_CMD;
1882         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1883
1884         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1885         for (;;)
1886         {
1887                 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1888                         set_current_state(TASK_UNINTERRUPTIBLE);
1889                         schedule_timeout(1);
1890                         smi_result = smi_info->handlers->event(
1891                                 smi_info->si_sm, 100);
1892                 }
1893                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1894                 {
1895                         smi_result = smi_info->handlers->event(
1896                                 smi_info->si_sm, 0);
1897                 }
1898                 else
1899                         break;
1900         }
1901         if (smi_result == SI_SM_HOSED) {
1902                 /* We couldn't get the state machine to run, so whatever's at
1903                    the port is probably not an IPMI SMI interface. */
1904                 rv = -ENODEV;
1905                 goto out;
1906         }
1907
1908         /* Otherwise, we got some data. */
1909         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1910                                                   resp, IPMI_MAX_MSG_LENGTH);
1911         if (resp_len < 6) {
1912                 /* That's odd, it should be longer. */
1913                 rv = -EINVAL;
1914                 goto out;
1915         }
1916
1917         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1918                 /* That's odd, it shouldn't be able to fail. */
1919                 rv = -EINVAL;
1920                 goto out;
1921         }
1922
1923         /* Record info from the get device id, in case we need it. */
1924         smi_info->ipmi_si_dev_rev = resp[4] & 0xf;
1925         smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f;
1926         smi_info->ipmi_si_fw_rev_minor = resp[6];
1927         smi_info->ipmi_version_major = resp[7] & 0xf;
1928         smi_info->ipmi_version_minor = resp[7] >> 4;
1929
1930  out:
1931         kfree(resp);
1932         return rv;
1933 }
1934
1935 static int type_file_read_proc(char *page, char **start, off_t off,
1936                                int count, int *eof, void *data)
1937 {
1938         char            *out = (char *) page;
1939         struct smi_info *smi = data;
1940
1941         switch (smi->si_type) {
1942             case SI_KCS:
1943                 return sprintf(out, "kcs\n");
1944             case SI_SMIC:
1945                 return sprintf(out, "smic\n");
1946             case SI_BT:
1947                 return sprintf(out, "bt\n");
1948             default:
1949                 return 0;
1950         }
1951 }
1952
1953 static int stat_file_read_proc(char *page, char **start, off_t off,
1954                                int count, int *eof, void *data)
1955 {
1956         char            *out = (char *) page;
1957         struct smi_info *smi = data;
1958
1959         out += sprintf(out, "interrupts_enabled:    %d\n",
1960                        smi->irq && !smi->interrupt_disabled);
1961         out += sprintf(out, "short_timeouts:        %ld\n",
1962                        smi->short_timeouts);
1963         out += sprintf(out, "long_timeouts:         %ld\n",
1964                        smi->long_timeouts);
1965         out += sprintf(out, "timeout_restarts:      %ld\n",
1966                        smi->timeout_restarts);
1967         out += sprintf(out, "idles:                 %ld\n",
1968                        smi->idles);
1969         out += sprintf(out, "interrupts:            %ld\n",
1970                        smi->interrupts);
1971         out += sprintf(out, "attentions:            %ld\n",
1972                        smi->attentions);
1973         out += sprintf(out, "flag_fetches:          %ld\n",
1974                        smi->flag_fetches);
1975         out += sprintf(out, "hosed_count:           %ld\n",
1976                        smi->hosed_count);
1977         out += sprintf(out, "complete_transactions: %ld\n",
1978                        smi->complete_transactions);
1979         out += sprintf(out, "events:                %ld\n",
1980                        smi->events);
1981         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
1982                        smi->watchdog_pretimeouts);
1983         out += sprintf(out, "incoming_messages:     %ld\n",
1984                        smi->incoming_messages);
1985
1986         return (out - ((char *) page));
1987 }
1988
1989 /* Returns 0 if initialized, or negative on an error. */
1990 static int init_one_smi(int intf_num, struct smi_info **smi)
1991 {
1992         int             rv;
1993         struct smi_info *new_smi;
1994
1995
1996         rv = try_init_mem(intf_num, &new_smi);
1997         if (rv)
1998                 rv = try_init_port(intf_num, &new_smi);
1999 #ifdef CONFIG_ACPI_INTERPRETER
2000         if ((rv) && (si_trydefaults)) {
2001                 rv = try_init_acpi(intf_num, &new_smi);
2002         }
2003 #endif
2004 #ifdef CONFIG_X86
2005         if ((rv) && (si_trydefaults)) {
2006                 rv = try_init_smbios(intf_num, &new_smi);
2007         }
2008 #endif
2009         if ((rv) && (si_trydefaults)) {
2010                 rv = try_init_plug_and_play(intf_num, &new_smi);
2011         }
2012
2013
2014         if (rv)
2015                 return rv;
2016
2017         /* So we know not to free it unless we have allocated one. */
2018         new_smi->intf = NULL;
2019         new_smi->si_sm = NULL;
2020         new_smi->handlers = NULL;
2021
2022         if (!new_smi->irq_setup) {
2023                 new_smi->irq = irqs[intf_num];
2024                 new_smi->irq_setup = std_irq_setup;
2025                 new_smi->irq_cleanup = std_irq_cleanup;
2026         }
2027
2028         /* Default to KCS if no type is specified. */
2029         if (si_type[intf_num] == NULL) {
2030                 if (si_trydefaults)
2031                         si_type[intf_num] = "kcs";
2032                 else {
2033                         rv = -EINVAL;
2034                         goto out_err;
2035                 }
2036         }
2037
2038         /* Set up the state machine to use. */
2039         if (strcmp(si_type[intf_num], "kcs") == 0) {
2040                 new_smi->handlers = &kcs_smi_handlers;
2041                 new_smi->si_type = SI_KCS;
2042         } else if (strcmp(si_type[intf_num], "smic") == 0) {
2043                 new_smi->handlers = &smic_smi_handlers;
2044                 new_smi->si_type = SI_SMIC;
2045         } else if (strcmp(si_type[intf_num], "bt") == 0) {
2046                 new_smi->handlers = &bt_smi_handlers;
2047                 new_smi->si_type = SI_BT;
2048         } else {
2049                 /* No support for anything else yet. */
2050                 rv = -EIO;
2051                 goto out_err;
2052         }
2053
2054         /* Allocate the state machine's data and initialize it. */
2055         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2056         if (!new_smi->si_sm) {
2057                 printk(" Could not allocate state machine memory\n");
2058                 rv = -ENOMEM;
2059                 goto out_err;
2060         }
2061         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2062                                                         &new_smi->io);
2063
2064         /* Now that we know the I/O size, we can set up the I/O. */
2065         rv = new_smi->io_setup(new_smi);
2066         if (rv) {
2067                 printk(" Could not set up I/O space\n");
2068                 goto out_err;
2069         }
2070
2071         spin_lock_init(&(new_smi->si_lock));
2072         spin_lock_init(&(new_smi->msg_lock));
2073         spin_lock_init(&(new_smi->count_lock));
2074
2075         /* Do low-level detection first. */
2076         if (new_smi->handlers->detect(new_smi->si_sm)) {
2077                 rv = -ENODEV;
2078                 goto out_err;
2079         }
2080
2081         /* Attempt a get device id command.  If it fails, we probably
2082            don't have a SMI here. */
2083         rv = try_get_dev_id(new_smi);
2084         if (rv)
2085                 goto out_err;
2086
2087         /* Try to claim any interrupts. */
2088         new_smi->irq_setup(new_smi);
2089
2090         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2091         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2092         new_smi->curr_msg = NULL;
2093         atomic_set(&new_smi->req_events, 0);
2094         new_smi->run_to_completion = 0;
2095
2096         new_smi->interrupt_disabled = 0;
2097         new_smi->timer_stopped = 0;
2098         new_smi->stop_operation = 0;
2099
2100         /* Start clearing the flags before we enable interrupts or the
2101            timer to avoid racing with the timer. */
2102         start_clear_flags(new_smi);
2103         /* IRQ is defined to be set when non-zero. */
2104         if (new_smi->irq)
2105                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2106
2107         /* The ipmi_register_smi() code does some operations to
2108            determine the channel information, so we must be ready to
2109            handle operations before it is called.  This means we have
2110            to stop the timer if we get an error after this point. */
2111         init_timer(&(new_smi->si_timer));
2112         new_smi->si_timer.data = (long) new_smi;
2113         new_smi->si_timer.function = smi_timeout;
2114         new_smi->last_timeout_jiffies = jiffies;
2115         new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2116         add_timer(&(new_smi->si_timer));
2117
2118         rv = ipmi_register_smi(&handlers,
2119                                new_smi,
2120                                new_smi->ipmi_version_major,
2121                                new_smi->ipmi_version_minor,
2122                                new_smi->slave_addr,
2123                                &(new_smi->intf));
2124         if (rv) {
2125                 printk(KERN_ERR
2126                        "ipmi_si: Unable to register device: error %d\n",
2127                        rv);
2128                 goto out_err_stop_timer;
2129         }
2130
2131         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2132                                      type_file_read_proc, NULL,
2133                                      new_smi, THIS_MODULE);
2134         if (rv) {
2135                 printk(KERN_ERR
2136                        "ipmi_si: Unable to create proc entry: %d\n",
2137                        rv);
2138                 goto out_err_stop_timer;
2139         }
2140
2141         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2142                                      stat_file_read_proc, NULL,
2143                                      new_smi, THIS_MODULE);
2144         if (rv) {
2145                 printk(KERN_ERR
2146                        "ipmi_si: Unable to create proc entry: %d\n",
2147                        rv);
2148                 goto out_err_stop_timer;
2149         }
2150
2151         *smi = new_smi;
2152
2153         printk(" IPMI %s interface initialized\n", si_type[intf_num]);
2154
2155         return 0;
2156
2157  out_err_stop_timer:
2158         new_smi->stop_operation = 1;
2159
2160         /* Wait for the timer to stop.  This avoids problems with race
2161            conditions removing the timer here. */
2162         while (!new_smi->timer_stopped) {
2163                 set_current_state(TASK_UNINTERRUPTIBLE);
2164                 schedule_timeout(1);
2165         }
2166
2167  out_err:
2168         if (new_smi->intf)
2169                 ipmi_unregister_smi(new_smi->intf);
2170
2171         new_smi->irq_cleanup(new_smi);
2172
2173         /* Wait until we know that we are out of any interrupt
2174            handlers might have been running before we freed the
2175            interrupt. */
2176         synchronize_sched();
2177
2178         if (new_smi->si_sm) {
2179                 if (new_smi->handlers)
2180                         new_smi->handlers->cleanup(new_smi->si_sm);
2181                 kfree(new_smi->si_sm);
2182         }
2183         new_smi->io_cleanup(new_smi);
2184
2185         return rv;
2186 }
2187
2188 static __init int init_ipmi_si(void)
2189 {
2190         int  rv = 0;
2191         int  pos = 0;
2192         int  i;
2193         char *str;
2194
2195         if (initialized)
2196                 return 0;
2197         initialized = 1;
2198
2199         /* Parse out the si_type string into its components. */
2200         str = si_type_str;
2201         if (*str != '\0') {
2202                 for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) {
2203                         si_type[i] = str;
2204                         str = strchr(str, ',');
2205                         if (str) {
2206                                 *str = '\0';
2207                                 str++;
2208                         } else {
2209                                 break;
2210                         }
2211                 }
2212         }
2213
2214         printk(KERN_INFO "IPMI System Interface driver version "
2215                IPMI_SI_VERSION);
2216         if (kcs_smi_handlers.version)
2217                 printk(", KCS version %s", kcs_smi_handlers.version);
2218         if (smic_smi_handlers.version)
2219                 printk(", SMIC version %s", smic_smi_handlers.version);
2220         if (bt_smi_handlers.version)
2221                 printk(", BT version %s", bt_smi_handlers.version);
2222         printk("\n");
2223
2224 #ifdef CONFIG_X86
2225         dmi_find_bmc();
2226 #endif
2227
2228         rv = init_one_smi(0, &(smi_infos[pos]));
2229         if (rv && !ports[0] && si_trydefaults) {
2230                 /* If we are trying defaults and the initial port is
2231                    not set, then set it. */
2232                 si_type[0] = "kcs";
2233                 ports[0] = DEFAULT_KCS_IO_PORT;
2234                 rv = init_one_smi(0, &(smi_infos[pos]));
2235                 if (rv) {
2236                         /* No KCS - try SMIC */
2237                         si_type[0] = "smic";
2238                         ports[0] = DEFAULT_SMIC_IO_PORT;
2239                         rv = init_one_smi(0, &(smi_infos[pos]));
2240                 }
2241                 if (rv) {
2242                         /* No SMIC - try BT */
2243                         si_type[0] = "bt";
2244                         ports[0] = DEFAULT_BT_IO_PORT;
2245                         rv = init_one_smi(0, &(smi_infos[pos]));
2246                 }
2247         }
2248         if (rv == 0)
2249                 pos++;
2250
2251         for (i=1; i < SI_MAX_PARMS; i++) {
2252                 rv = init_one_smi(i, &(smi_infos[pos]));
2253                 if (rv == 0)
2254                         pos++;
2255         }
2256
2257         if (smi_infos[0] == NULL) {
2258                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2259                 return -ENODEV;
2260         }
2261
2262         return 0;
2263 }
2264 module_init(init_ipmi_si);
2265
2266 static void __exit cleanup_one_si(struct smi_info *to_clean)
2267 {
2268         int           rv;
2269         unsigned long flags;
2270
2271         if (! to_clean)
2272                 return;
2273
2274         /* Tell the timer and interrupt handlers that we are shutting
2275            down. */
2276         spin_lock_irqsave(&(to_clean->si_lock), flags);
2277         spin_lock(&(to_clean->msg_lock));
2278
2279         to_clean->stop_operation = 1;
2280
2281         to_clean->irq_cleanup(to_clean);
2282
2283         spin_unlock(&(to_clean->msg_lock));
2284         spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2285
2286         /* Wait until we know that we are out of any interrupt
2287            handlers might have been running before we freed the
2288            interrupt. */
2289         synchronize_sched();
2290
2291         /* Wait for the timer to stop.  This avoids problems with race
2292            conditions removing the timer here. */
2293         while (!to_clean->timer_stopped) {
2294                 set_current_state(TASK_UNINTERRUPTIBLE);
2295                 schedule_timeout(1);
2296         }
2297
2298         /* Interrupts and timeouts are stopped, now make sure the
2299            interface is in a clean state. */
2300         while ((to_clean->curr_msg) || (to_clean->si_state != SI_NORMAL)) {
2301                 poll(to_clean);
2302                 set_current_state(TASK_UNINTERRUPTIBLE);
2303                 schedule_timeout(1);
2304         }
2305
2306         rv = ipmi_unregister_smi(to_clean->intf);
2307         if (rv) {
2308                 printk(KERN_ERR
2309                        "ipmi_si: Unable to unregister device: errno=%d\n",
2310                        rv);
2311         }
2312
2313         to_clean->handlers->cleanup(to_clean->si_sm);
2314
2315         kfree(to_clean->si_sm);
2316
2317         to_clean->io_cleanup(to_clean);
2318 }
2319
2320 static __exit void cleanup_ipmi_si(void)
2321 {
2322         int i;
2323
2324         if (!initialized)
2325                 return;
2326
2327         for (i=0; i<SI_MAX_DRIVERS; i++) {
2328                 cleanup_one_si(smi_infos[i]);
2329         }
2330 }
2331 module_exit(cleanup_ipmi_si);
2332
2333 MODULE_LICENSE("GPL");