]> err.no Git - linux-2.6/blob - drivers/char/ipmi/ipmi_si_intf.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13  *
14  *  This program is free software; you can redistribute it and/or modify it
15  *  under the terms of the GNU General Public License as published by the
16  *  Free Software Foundation; either version 2 of the License, or (at your
17  *  option) any later version.
18  *
19  *
20  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  *  You should have received a copy of the GNU General Public License along
32  *  with this program; if not, write to the Free Software Foundation, Inc.,
33  *  675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35
36 /*
37  * This file holds the "policy" for the interface to the SMI state
38  * machine.  It does the configuration, handles timers and interrupts,
39  * and drives the real SMI state machine.
40  */
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi_smi.h>
61 #include <asm/io.h>
62 #include "ipmi_si_sm.h"
63 #include <linux/init.h>
64 #include <linux/dmi.h>
65 #include <linux/string.h>
66 #include <linux/ctype.h>
67
68 #ifdef CONFIG_PPC_OF
69 #include <asm/of_device.h>
70 #include <asm/of_platform.h>
71 #endif
72
73 #define PFX "ipmi_si: "
74
75 /* Measure times between events in the driver. */
76 #undef DEBUG_TIMING
77
78 /* Call every 10 ms. */
79 #define SI_TIMEOUT_TIME_USEC    10000
80 #define SI_USEC_PER_JIFFY       (1000000/HZ)
81 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
83                                        short timeout */
84
85 /* Bit for BMC global enables. */
86 #define IPMI_BMC_RCV_MSG_INTR     0x01
87 #define IPMI_BMC_EVT_MSG_INTR     0x02
88 #define IPMI_BMC_EVT_MSG_BUFF     0x04
89 #define IPMI_BMC_SYS_LOG          0x08
90
91 enum si_intf_state {
92         SI_NORMAL,
93         SI_GETTING_FLAGS,
94         SI_GETTING_EVENTS,
95         SI_CLEARING_FLAGS,
96         SI_CLEARING_FLAGS_THEN_SET_IRQ,
97         SI_GETTING_MESSAGES,
98         SI_ENABLE_INTERRUPTS1,
99         SI_ENABLE_INTERRUPTS2,
100         SI_DISABLE_INTERRUPTS1,
101         SI_DISABLE_INTERRUPTS2
102         /* FIXME - add watchdog stuff. */
103 };
104
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG             2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
109
110 enum si_type {
111     SI_KCS, SI_SMIC, SI_BT
112 };
113 static char *si_to_str[] = { "kcs", "smic", "bt" };
114
115 #define DEVICE_NAME "ipmi_si"
116
117 static struct device_driver ipmi_driver =
118 {
119         .name = DEVICE_NAME,
120         .bus = &platform_bus_type
121 };
122
123 struct smi_info
124 {
125         int                    intf_num;
126         ipmi_smi_t             intf;
127         struct si_sm_data      *si_sm;
128         struct si_sm_handlers  *handlers;
129         enum si_type           si_type;
130         spinlock_t             si_lock;
131         spinlock_t             msg_lock;
132         struct list_head       xmit_msgs;
133         struct list_head       hp_xmit_msgs;
134         struct ipmi_smi_msg    *curr_msg;
135         enum si_intf_state     si_state;
136
137         /* Used to handle the various types of I/O that can occur with
138            IPMI */
139         struct si_sm_io io;
140         int (*io_setup)(struct smi_info *info);
141         void (*io_cleanup)(struct smi_info *info);
142         int (*irq_setup)(struct smi_info *info);
143         void (*irq_cleanup)(struct smi_info *info);
144         unsigned int io_size;
145         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146         void (*addr_source_cleanup)(struct smi_info *info);
147         void *addr_source_data;
148
149         /* Per-OEM handler, called from handle_flags().
150            Returns 1 when handle_flags() needs to be re-run
151            or 0 indicating it set si_state itself.
152         */
153         int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
155         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156            is set to hold the flags until we are done handling everything
157            from the flags. */
158 #define RECEIVE_MSG_AVAIL       0x01
159 #define EVENT_MSG_BUFFER_FULL   0x02
160 #define WDT_PRE_TIMEOUT_INT     0x08
161 #define OEM0_DATA_AVAIL     0x20
162 #define OEM1_DATA_AVAIL     0x40
163 #define OEM2_DATA_AVAIL     0x80
164 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
165                              OEM1_DATA_AVAIL | \
166                              OEM2_DATA_AVAIL)
167         unsigned char       msg_flags;
168
169         /* If set to true, this will request events the next time the
170            state machine is idle. */
171         atomic_t            req_events;
172
173         /* If true, run the state machine to completion on every send
174            call.  Generally used after a panic to make sure stuff goes
175            out. */
176         int                 run_to_completion;
177
178         /* The I/O port of an SI interface. */
179         int                 port;
180
181         /* The space between start addresses of the two ports.  For
182            instance, if the first port is 0xca2 and the spacing is 4, then
183            the second port is 0xca6. */
184         unsigned int        spacing;
185
186         /* zero if no irq; */
187         int                 irq;
188
189         /* The timer for this si. */
190         struct timer_list   si_timer;
191
192         /* The time (in jiffies) the last timeout occurred at. */
193         unsigned long       last_timeout_jiffies;
194
195         /* Used to gracefully stop the timer without race conditions. */
196         atomic_t            stop_operation;
197
198         /* The driver will disable interrupts when it gets into a
199            situation where it cannot handle messages due to lack of
200            memory.  Once that situation clears up, it will re-enable
201            interrupts. */
202         int interrupt_disabled;
203
204         /* From the get device id response... */
205         struct ipmi_device_id device_id;
206
207         /* Driver model stuff. */
208         struct device *dev;
209         struct platform_device *pdev;
210
211          /* True if we allocated the device, false if it came from
212           * someplace else (like PCI). */
213         int dev_registered;
214
215         /* Slave address, could be reported from DMI. */
216         unsigned char slave_addr;
217
218         /* Counters and things for the proc filesystem. */
219         spinlock_t count_lock;
220         unsigned long short_timeouts;
221         unsigned long long_timeouts;
222         unsigned long timeout_restarts;
223         unsigned long idles;
224         unsigned long interrupts;
225         unsigned long attentions;
226         unsigned long flag_fetches;
227         unsigned long hosed_count;
228         unsigned long complete_transactions;
229         unsigned long events;
230         unsigned long watchdog_pretimeouts;
231         unsigned long incoming_messages;
232
233         struct task_struct *thread;
234
235         struct list_head link;
236 };
237
238 #define SI_MAX_PARMS 4
239
240 static int force_kipmid[SI_MAX_PARMS];
241 static int num_force_kipmid;
242
243 static int unload_when_empty = 1;
244
245 static int try_smi_init(struct smi_info *smi);
246 static void cleanup_one_si(struct smi_info *to_clean);
247
248 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
249 static int register_xaction_notifier(struct notifier_block * nb)
250 {
251         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
252 }
253
254 static void deliver_recv_msg(struct smi_info *smi_info,
255                              struct ipmi_smi_msg *msg)
256 {
257         /* Deliver the message to the upper layer with the lock
258            released. */
259         spin_unlock(&(smi_info->si_lock));
260         ipmi_smi_msg_received(smi_info->intf, msg);
261         spin_lock(&(smi_info->si_lock));
262 }
263
264 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
265 {
266         struct ipmi_smi_msg *msg = smi_info->curr_msg;
267
268         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
269                 cCode = IPMI_ERR_UNSPECIFIED;
270         /* else use it as is */
271
272         /* Make it a reponse */
273         msg->rsp[0] = msg->data[0] | 4;
274         msg->rsp[1] = msg->data[1];
275         msg->rsp[2] = cCode;
276         msg->rsp_size = 3;
277
278         smi_info->curr_msg = NULL;
279         deliver_recv_msg(smi_info, msg);
280 }
281
282 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
283 {
284         int              rv;
285         struct list_head *entry = NULL;
286 #ifdef DEBUG_TIMING
287         struct timeval t;
288 #endif
289
290         /* No need to save flags, we aleady have interrupts off and we
291            already hold the SMI lock. */
292         spin_lock(&(smi_info->msg_lock));
293
294         /* Pick the high priority queue first. */
295         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
296                 entry = smi_info->hp_xmit_msgs.next;
297         } else if (!list_empty(&(smi_info->xmit_msgs))) {
298                 entry = smi_info->xmit_msgs.next;
299         }
300
301         if (!entry) {
302                 smi_info->curr_msg = NULL;
303                 rv = SI_SM_IDLE;
304         } else {
305                 int err;
306
307                 list_del(entry);
308                 smi_info->curr_msg = list_entry(entry,
309                                                 struct ipmi_smi_msg,
310                                                 link);
311 #ifdef DEBUG_TIMING
312                 do_gettimeofday(&t);
313                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
314 #endif
315                 err = atomic_notifier_call_chain(&xaction_notifier_list,
316                                 0, smi_info);
317                 if (err & NOTIFY_STOP_MASK) {
318                         rv = SI_SM_CALL_WITHOUT_DELAY;
319                         goto out;
320                 }
321                 err = smi_info->handlers->start_transaction(
322                         smi_info->si_sm,
323                         smi_info->curr_msg->data,
324                         smi_info->curr_msg->data_size);
325                 if (err) {
326                         return_hosed_msg(smi_info, err);
327                 }
328
329                 rv = SI_SM_CALL_WITHOUT_DELAY;
330         }
331         out:
332         spin_unlock(&(smi_info->msg_lock));
333
334         return rv;
335 }
336
337 static void start_enable_irq(struct smi_info *smi_info)
338 {
339         unsigned char msg[2];
340
341         /* If we are enabling interrupts, we have to tell the
342            BMC to use them. */
343         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
344         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
345
346         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
347         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
348 }
349
350 static void start_disable_irq(struct smi_info *smi_info)
351 {
352         unsigned char msg[2];
353
354         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
355         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
356
357         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
358         smi_info->si_state = SI_DISABLE_INTERRUPTS1;
359 }
360
361 static void start_clear_flags(struct smi_info *smi_info)
362 {
363         unsigned char msg[3];
364
365         /* Make sure the watchdog pre-timeout flag is not set at startup. */
366         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
367         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
368         msg[2] = WDT_PRE_TIMEOUT_INT;
369
370         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
371         smi_info->si_state = SI_CLEARING_FLAGS;
372 }
373
374 /* When we have a situtaion where we run out of memory and cannot
375    allocate messages, we just leave them in the BMC and run the system
376    polled until we can allocate some memory.  Once we have some
377    memory, we will re-enable the interrupt. */
378 static inline void disable_si_irq(struct smi_info *smi_info)
379 {
380         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
381                 start_disable_irq(smi_info);
382                 smi_info->interrupt_disabled = 1;
383         }
384 }
385
386 static inline void enable_si_irq(struct smi_info *smi_info)
387 {
388         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
389                 start_enable_irq(smi_info);
390                 smi_info->interrupt_disabled = 0;
391         }
392 }
393
394 static void handle_flags(struct smi_info *smi_info)
395 {
396  retry:
397         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
398                 /* Watchdog pre-timeout */
399                 spin_lock(&smi_info->count_lock);
400                 smi_info->watchdog_pretimeouts++;
401                 spin_unlock(&smi_info->count_lock);
402
403                 start_clear_flags(smi_info);
404                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
405                 spin_unlock(&(smi_info->si_lock));
406                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
407                 spin_lock(&(smi_info->si_lock));
408         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
409                 /* Messages available. */
410                 smi_info->curr_msg = ipmi_alloc_smi_msg();
411                 if (!smi_info->curr_msg) {
412                         disable_si_irq(smi_info);
413                         smi_info->si_state = SI_NORMAL;
414                         return;
415                 }
416                 enable_si_irq(smi_info);
417
418                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
419                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
420                 smi_info->curr_msg->data_size = 2;
421
422                 smi_info->handlers->start_transaction(
423                         smi_info->si_sm,
424                         smi_info->curr_msg->data,
425                         smi_info->curr_msg->data_size);
426                 smi_info->si_state = SI_GETTING_MESSAGES;
427         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
428                 /* Events available. */
429                 smi_info->curr_msg = ipmi_alloc_smi_msg();
430                 if (!smi_info->curr_msg) {
431                         disable_si_irq(smi_info);
432                         smi_info->si_state = SI_NORMAL;
433                         return;
434                 }
435                 enable_si_irq(smi_info);
436
437                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
438                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
439                 smi_info->curr_msg->data_size = 2;
440
441                 smi_info->handlers->start_transaction(
442                         smi_info->si_sm,
443                         smi_info->curr_msg->data,
444                         smi_info->curr_msg->data_size);
445                 smi_info->si_state = SI_GETTING_EVENTS;
446         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
447                    smi_info->oem_data_avail_handler) {
448                 if (smi_info->oem_data_avail_handler(smi_info))
449                         goto retry;
450         } else {
451                 smi_info->si_state = SI_NORMAL;
452         }
453 }
454
455 static void handle_transaction_done(struct smi_info *smi_info)
456 {
457         struct ipmi_smi_msg *msg;
458 #ifdef DEBUG_TIMING
459         struct timeval t;
460
461         do_gettimeofday(&t);
462         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
463 #endif
464         switch (smi_info->si_state) {
465         case SI_NORMAL:
466                 if (!smi_info->curr_msg)
467                         break;
468
469                 smi_info->curr_msg->rsp_size
470                         = smi_info->handlers->get_result(
471                                 smi_info->si_sm,
472                                 smi_info->curr_msg->rsp,
473                                 IPMI_MAX_MSG_LENGTH);
474
475                 /* Do this here becase deliver_recv_msg() releases the
476                    lock, and a new message can be put in during the
477                    time the lock is released. */
478                 msg = smi_info->curr_msg;
479                 smi_info->curr_msg = NULL;
480                 deliver_recv_msg(smi_info, msg);
481                 break;
482
483         case SI_GETTING_FLAGS:
484         {
485                 unsigned char msg[4];
486                 unsigned int  len;
487
488                 /* We got the flags from the SMI, now handle them. */
489                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
490                 if (msg[2] != 0) {
491                         /* Error fetching flags, just give up for
492                            now. */
493                         smi_info->si_state = SI_NORMAL;
494                 } else if (len < 4) {
495                         /* Hmm, no flags.  That's technically illegal, but
496                            don't use uninitialized data. */
497                         smi_info->si_state = SI_NORMAL;
498                 } else {
499                         smi_info->msg_flags = msg[3];
500                         handle_flags(smi_info);
501                 }
502                 break;
503         }
504
505         case SI_CLEARING_FLAGS:
506         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
507         {
508                 unsigned char msg[3];
509
510                 /* We cleared the flags. */
511                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
512                 if (msg[2] != 0) {
513                         /* Error clearing flags */
514                         printk(KERN_WARNING
515                                "ipmi_si: Error clearing flags: %2.2x\n",
516                                msg[2]);
517                 }
518                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
519                         start_enable_irq(smi_info);
520                 else
521                         smi_info->si_state = SI_NORMAL;
522                 break;
523         }
524
525         case SI_GETTING_EVENTS:
526         {
527                 smi_info->curr_msg->rsp_size
528                         = smi_info->handlers->get_result(
529                                 smi_info->si_sm,
530                                 smi_info->curr_msg->rsp,
531                                 IPMI_MAX_MSG_LENGTH);
532
533                 /* Do this here becase deliver_recv_msg() releases the
534                    lock, and a new message can be put in during the
535                    time the lock is released. */
536                 msg = smi_info->curr_msg;
537                 smi_info->curr_msg = NULL;
538                 if (msg->rsp[2] != 0) {
539                         /* Error getting event, probably done. */
540                         msg->done(msg);
541
542                         /* Take off the event flag. */
543                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
544                         handle_flags(smi_info);
545                 } else {
546                         spin_lock(&smi_info->count_lock);
547                         smi_info->events++;
548                         spin_unlock(&smi_info->count_lock);
549
550                         /* Do this before we deliver the message
551                            because delivering the message releases the
552                            lock and something else can mess with the
553                            state. */
554                         handle_flags(smi_info);
555
556                         deliver_recv_msg(smi_info, msg);
557                 }
558                 break;
559         }
560
561         case SI_GETTING_MESSAGES:
562         {
563                 smi_info->curr_msg->rsp_size
564                         = smi_info->handlers->get_result(
565                                 smi_info->si_sm,
566                                 smi_info->curr_msg->rsp,
567                                 IPMI_MAX_MSG_LENGTH);
568
569                 /* Do this here becase deliver_recv_msg() releases the
570                    lock, and a new message can be put in during the
571                    time the lock is released. */
572                 msg = smi_info->curr_msg;
573                 smi_info->curr_msg = NULL;
574                 if (msg->rsp[2] != 0) {
575                         /* Error getting event, probably done. */
576                         msg->done(msg);
577
578                         /* Take off the msg flag. */
579                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
580                         handle_flags(smi_info);
581                 } else {
582                         spin_lock(&smi_info->count_lock);
583                         smi_info->incoming_messages++;
584                         spin_unlock(&smi_info->count_lock);
585
586                         /* Do this before we deliver the message
587                            because delivering the message releases the
588                            lock and something else can mess with the
589                            state. */
590                         handle_flags(smi_info);
591
592                         deliver_recv_msg(smi_info, msg);
593                 }
594                 break;
595         }
596
597         case SI_ENABLE_INTERRUPTS1:
598         {
599                 unsigned char msg[4];
600
601                 /* We got the flags from the SMI, now handle them. */
602                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
603                 if (msg[2] != 0) {
604                         printk(KERN_WARNING
605                                "ipmi_si: Could not enable interrupts"
606                                ", failed get, using polled mode.\n");
607                         smi_info->si_state = SI_NORMAL;
608                 } else {
609                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
610                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
611                         msg[2] = (msg[3] |
612                                   IPMI_BMC_RCV_MSG_INTR |
613                                   IPMI_BMC_EVT_MSG_INTR);
614                         smi_info->handlers->start_transaction(
615                                 smi_info->si_sm, msg, 3);
616                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
617                 }
618                 break;
619         }
620
621         case SI_ENABLE_INTERRUPTS2:
622         {
623                 unsigned char msg[4];
624
625                 /* We got the flags from the SMI, now handle them. */
626                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
627                 if (msg[2] != 0) {
628                         printk(KERN_WARNING
629                                "ipmi_si: Could not enable interrupts"
630                                ", failed set, using polled mode.\n");
631                 }
632                 smi_info->si_state = SI_NORMAL;
633                 break;
634         }
635
636         case SI_DISABLE_INTERRUPTS1:
637         {
638                 unsigned char msg[4];
639
640                 /* We got the flags from the SMI, now handle them. */
641                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
642                 if (msg[2] != 0) {
643                         printk(KERN_WARNING
644                                "ipmi_si: Could not disable interrupts"
645                                ", failed get.\n");
646                         smi_info->si_state = SI_NORMAL;
647                 } else {
648                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
649                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
650                         msg[2] = (msg[3] &
651                                   ~(IPMI_BMC_RCV_MSG_INTR |
652                                     IPMI_BMC_EVT_MSG_INTR));
653                         smi_info->handlers->start_transaction(
654                                 smi_info->si_sm, msg, 3);
655                         smi_info->si_state = SI_DISABLE_INTERRUPTS2;
656                 }
657                 break;
658         }
659
660         case SI_DISABLE_INTERRUPTS2:
661         {
662                 unsigned char msg[4];
663
664                 /* We got the flags from the SMI, now handle them. */
665                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
666                 if (msg[2] != 0) {
667                         printk(KERN_WARNING
668                                "ipmi_si: Could not disable interrupts"
669                                ", failed set.\n");
670                 }
671                 smi_info->si_state = SI_NORMAL;
672                 break;
673         }
674         }
675 }
676
677 /* Called on timeouts and events.  Timeouts should pass the elapsed
678    time, interrupts should pass in zero. */
679 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
680                                            int time)
681 {
682         enum si_sm_result si_sm_result;
683
684  restart:
685         /* There used to be a loop here that waited a little while
686            (around 25us) before giving up.  That turned out to be
687            pointless, the minimum delays I was seeing were in the 300us
688            range, which is far too long to wait in an interrupt.  So
689            we just run until the state machine tells us something
690            happened or it needs a delay. */
691         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
692         time = 0;
693         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
694         {
695                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
696         }
697
698         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
699         {
700                 spin_lock(&smi_info->count_lock);
701                 smi_info->complete_transactions++;
702                 spin_unlock(&smi_info->count_lock);
703
704                 handle_transaction_done(smi_info);
705                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
706         }
707         else if (si_sm_result == SI_SM_HOSED)
708         {
709                 spin_lock(&smi_info->count_lock);
710                 smi_info->hosed_count++;
711                 spin_unlock(&smi_info->count_lock);
712
713                 /* Do the before return_hosed_msg, because that
714                    releases the lock. */
715                 smi_info->si_state = SI_NORMAL;
716                 if (smi_info->curr_msg != NULL) {
717                         /* If we were handling a user message, format
718                            a response to send to the upper layer to
719                            tell it about the error. */
720                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
721                 }
722                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
723         }
724
725         /* We prefer handling attn over new messages. */
726         if (si_sm_result == SI_SM_ATTN)
727         {
728                 unsigned char msg[2];
729
730                 spin_lock(&smi_info->count_lock);
731                 smi_info->attentions++;
732                 spin_unlock(&smi_info->count_lock);
733
734                 /* Got a attn, send down a get message flags to see
735                    what's causing it.  It would be better to handle
736                    this in the upper layer, but due to the way
737                    interrupts work with the SMI, that's not really
738                    possible. */
739                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
740                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
741
742                 smi_info->handlers->start_transaction(
743                         smi_info->si_sm, msg, 2);
744                 smi_info->si_state = SI_GETTING_FLAGS;
745                 goto restart;
746         }
747
748         /* If we are currently idle, try to start the next message. */
749         if (si_sm_result == SI_SM_IDLE) {
750                 spin_lock(&smi_info->count_lock);
751                 smi_info->idles++;
752                 spin_unlock(&smi_info->count_lock);
753
754                 si_sm_result = start_next_msg(smi_info);
755                 if (si_sm_result != SI_SM_IDLE)
756                         goto restart;
757         }
758
759         if ((si_sm_result == SI_SM_IDLE)
760             && (atomic_read(&smi_info->req_events)))
761         {
762                 /* We are idle and the upper layer requested that I fetch
763                    events, so do so. */
764                 atomic_set(&smi_info->req_events, 0);
765
766                 smi_info->curr_msg = ipmi_alloc_smi_msg();
767                 if (!smi_info->curr_msg)
768                         goto out;
769
770                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
771                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
772                 smi_info->curr_msg->data_size = 2;
773
774                 smi_info->handlers->start_transaction(
775                         smi_info->si_sm,
776                         smi_info->curr_msg->data,
777                         smi_info->curr_msg->data_size);
778                 smi_info->si_state = SI_GETTING_EVENTS;
779                 goto restart;
780         }
781  out:
782         return si_sm_result;
783 }
784
785 static void sender(void                *send_info,
786                    struct ipmi_smi_msg *msg,
787                    int                 priority)
788 {
789         struct smi_info   *smi_info = send_info;
790         enum si_sm_result result;
791         unsigned long     flags;
792 #ifdef DEBUG_TIMING
793         struct timeval    t;
794 #endif
795
796         if (atomic_read(&smi_info->stop_operation)) {
797                 msg->rsp[0] = msg->data[0] | 4;
798                 msg->rsp[1] = msg->data[1];
799                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
800                 msg->rsp_size = 3;
801                 deliver_recv_msg(smi_info, msg);
802                 return;
803         }
804
805         spin_lock_irqsave(&(smi_info->msg_lock), flags);
806 #ifdef DEBUG_TIMING
807         do_gettimeofday(&t);
808         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
809 #endif
810
811         if (smi_info->run_to_completion) {
812                 /* If we are running to completion, then throw it in
813                    the list and run transactions until everything is
814                    clear.  Priority doesn't matter here. */
815                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
816
817                 /* We have to release the msg lock and claim the smi
818                    lock in this case, because of race conditions. */
819                 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
820
821                 spin_lock_irqsave(&(smi_info->si_lock), flags);
822                 result = smi_event_handler(smi_info, 0);
823                 while (result != SI_SM_IDLE) {
824                         udelay(SI_SHORT_TIMEOUT_USEC);
825                         result = smi_event_handler(smi_info,
826                                                    SI_SHORT_TIMEOUT_USEC);
827                 }
828                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
829                 return;
830         } else {
831                 if (priority > 0) {
832                         list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
833                 } else {
834                         list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
835                 }
836         }
837         spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
838
839         spin_lock_irqsave(&(smi_info->si_lock), flags);
840         if ((smi_info->si_state == SI_NORMAL)
841             && (smi_info->curr_msg == NULL))
842         {
843                 start_next_msg(smi_info);
844         }
845         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
846 }
847
848 static void set_run_to_completion(void *send_info, int i_run_to_completion)
849 {
850         struct smi_info   *smi_info = send_info;
851         enum si_sm_result result;
852         unsigned long     flags;
853
854         spin_lock_irqsave(&(smi_info->si_lock), flags);
855
856         smi_info->run_to_completion = i_run_to_completion;
857         if (i_run_to_completion) {
858                 result = smi_event_handler(smi_info, 0);
859                 while (result != SI_SM_IDLE) {
860                         udelay(SI_SHORT_TIMEOUT_USEC);
861                         result = smi_event_handler(smi_info,
862                                                    SI_SHORT_TIMEOUT_USEC);
863                 }
864         }
865
866         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
867 }
868
869 static int ipmi_thread(void *data)
870 {
871         struct smi_info *smi_info = data;
872         unsigned long flags;
873         enum si_sm_result smi_result;
874
875         set_user_nice(current, 19);
876         while (!kthread_should_stop()) {
877                 spin_lock_irqsave(&(smi_info->si_lock), flags);
878                 smi_result = smi_event_handler(smi_info, 0);
879                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
880                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
881                         /* do nothing */
882                 }
883                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
884                         schedule();
885                 else
886                         schedule_timeout_interruptible(1);
887         }
888         return 0;
889 }
890
891
892 static void poll(void *send_info)
893 {
894         struct smi_info *smi_info = send_info;
895
896         /*
897          * Make sure there is some delay in the poll loop so we can
898          * drive time forward and timeout things.
899          */
900         udelay(10);
901         smi_event_handler(smi_info, 10);
902 }
903
904 static void request_events(void *send_info)
905 {
906         struct smi_info *smi_info = send_info;
907
908         if (atomic_read(&smi_info->stop_operation))
909                 return;
910
911         atomic_set(&smi_info->req_events, 1);
912 }
913
914 static int initialized;
915
916 static void smi_timeout(unsigned long data)
917 {
918         struct smi_info   *smi_info = (struct smi_info *) data;
919         enum si_sm_result smi_result;
920         unsigned long     flags;
921         unsigned long     jiffies_now;
922         long              time_diff;
923 #ifdef DEBUG_TIMING
924         struct timeval    t;
925 #endif
926
927         spin_lock_irqsave(&(smi_info->si_lock), flags);
928 #ifdef DEBUG_TIMING
929         do_gettimeofday(&t);
930         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
931 #endif
932         jiffies_now = jiffies;
933         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
934                      * SI_USEC_PER_JIFFY);
935         smi_result = smi_event_handler(smi_info, time_diff);
936
937         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
938
939         smi_info->last_timeout_jiffies = jiffies_now;
940
941         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
942                 /* Running with interrupts, only do long timeouts. */
943                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
944                 spin_lock_irqsave(&smi_info->count_lock, flags);
945                 smi_info->long_timeouts++;
946                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
947                 goto do_add_timer;
948         }
949
950         /* If the state machine asks for a short delay, then shorten
951            the timer timeout. */
952         if (smi_result == SI_SM_CALL_WITH_DELAY) {
953                 spin_lock_irqsave(&smi_info->count_lock, flags);
954                 smi_info->short_timeouts++;
955                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
956                 smi_info->si_timer.expires = jiffies + 1;
957         } else {
958                 spin_lock_irqsave(&smi_info->count_lock, flags);
959                 smi_info->long_timeouts++;
960                 spin_unlock_irqrestore(&smi_info->count_lock, flags);
961                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
962         }
963
964  do_add_timer:
965         add_timer(&(smi_info->si_timer));
966 }
967
968 static irqreturn_t si_irq_handler(int irq, void *data)
969 {
970         struct smi_info *smi_info = data;
971         unsigned long   flags;
972 #ifdef DEBUG_TIMING
973         struct timeval  t;
974 #endif
975
976         spin_lock_irqsave(&(smi_info->si_lock), flags);
977
978         spin_lock(&smi_info->count_lock);
979         smi_info->interrupts++;
980         spin_unlock(&smi_info->count_lock);
981
982 #ifdef DEBUG_TIMING
983         do_gettimeofday(&t);
984         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
985 #endif
986         smi_event_handler(smi_info, 0);
987         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
988         return IRQ_HANDLED;
989 }
990
991 static irqreturn_t si_bt_irq_handler(int irq, void *data)
992 {
993         struct smi_info *smi_info = data;
994         /* We need to clear the IRQ flag for the BT interface. */
995         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
996                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
997                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
998         return si_irq_handler(irq, data);
999 }
1000
1001 static int smi_start_processing(void       *send_info,
1002                                 ipmi_smi_t intf)
1003 {
1004         struct smi_info *new_smi = send_info;
1005         int             enable = 0;
1006
1007         new_smi->intf = intf;
1008
1009         /* Set up the timer that drives the interface. */
1010         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1011         new_smi->last_timeout_jiffies = jiffies;
1012         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1013
1014         /*
1015          * Check if the user forcefully enabled the daemon.
1016          */
1017         if (new_smi->intf_num < num_force_kipmid)
1018                 enable = force_kipmid[new_smi->intf_num];
1019         /*
1020          * The BT interface is efficient enough to not need a thread,
1021          * and there is no need for a thread if we have interrupts.
1022          */
1023         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1024                 enable = 1;
1025
1026         if (enable) {
1027                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1028                                               "kipmi%d", new_smi->intf_num);
1029                 if (IS_ERR(new_smi->thread)) {
1030                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
1031                                " kernel thread due to error %ld, only using"
1032                                " timers to drive the interface\n",
1033                                PTR_ERR(new_smi->thread));
1034                         new_smi->thread = NULL;
1035                 }
1036         }
1037
1038         return 0;
1039 }
1040
1041 static void set_maintenance_mode(void *send_info, int enable)
1042 {
1043         struct smi_info   *smi_info = send_info;
1044
1045         if (!enable)
1046                 atomic_set(&smi_info->req_events, 0);
1047 }
1048
1049 static struct ipmi_smi_handlers handlers =
1050 {
1051         .owner                  = THIS_MODULE,
1052         .start_processing       = smi_start_processing,
1053         .sender                 = sender,
1054         .request_events         = request_events,
1055         .set_maintenance_mode   = set_maintenance_mode,
1056         .set_run_to_completion  = set_run_to_completion,
1057         .poll                   = poll,
1058 };
1059
1060 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1061    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
1062
1063 static LIST_HEAD(smi_infos);
1064 static DEFINE_MUTEX(smi_infos_lock);
1065 static int smi_num; /* Used to sequence the SMIs */
1066
1067 #define DEFAULT_REGSPACING      1
1068 #define DEFAULT_REGSIZE         1
1069
1070 static int           si_trydefaults = 1;
1071 static char          *si_type[SI_MAX_PARMS];
1072 #define MAX_SI_TYPE_STR 30
1073 static char          si_type_str[MAX_SI_TYPE_STR];
1074 static unsigned long addrs[SI_MAX_PARMS];
1075 static int num_addrs;
1076 static unsigned int  ports[SI_MAX_PARMS];
1077 static int num_ports;
1078 static int           irqs[SI_MAX_PARMS];
1079 static int num_irqs;
1080 static int           regspacings[SI_MAX_PARMS];
1081 static int num_regspacings;
1082 static int           regsizes[SI_MAX_PARMS];
1083 static int num_regsizes;
1084 static int           regshifts[SI_MAX_PARMS];
1085 static int num_regshifts;
1086 static int slave_addrs[SI_MAX_PARMS];
1087 static int num_slave_addrs;
1088
1089 #define IPMI_IO_ADDR_SPACE  0
1090 #define IPMI_MEM_ADDR_SPACE 1
1091 static char *addr_space_to_str[] = { "i/o", "mem" };
1092
1093 static int hotmod_handler(const char *val, struct kernel_param *kp);
1094
1095 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1096 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1097                  " Documentation/IPMI.txt in the kernel sources for the"
1098                  " gory details.");
1099
1100 module_param_named(trydefaults, si_trydefaults, bool, 0);
1101 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1102                  " default scan of the KCS and SMIC interface at the standard"
1103                  " address");
1104 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1105 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1106                  " interface separated by commas.  The types are 'kcs',"
1107                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1108                  " the first interface to kcs and the second to bt");
1109 module_param_array(addrs, long, &num_addrs, 0);
1110 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1111                  " addresses separated by commas.  Only use if an interface"
1112                  " is in memory.  Otherwise, set it to zero or leave"
1113                  " it blank.");
1114 module_param_array(ports, int, &num_ports, 0);
1115 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1116                  " addresses separated by commas.  Only use if an interface"
1117                  " is a port.  Otherwise, set it to zero or leave"
1118                  " it blank.");
1119 module_param_array(irqs, int, &num_irqs, 0);
1120 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1121                  " addresses separated by commas.  Only use if an interface"
1122                  " has an interrupt.  Otherwise, set it to zero or leave"
1123                  " it blank.");
1124 module_param_array(regspacings, int, &num_regspacings, 0);
1125 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1126                  " and each successive register used by the interface.  For"
1127                  " instance, if the start address is 0xca2 and the spacing"
1128                  " is 2, then the second address is at 0xca4.  Defaults"
1129                  " to 1.");
1130 module_param_array(regsizes, int, &num_regsizes, 0);
1131 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1132                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1133                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1134                  " the 8-bit IPMI register has to be read from a larger"
1135                  " register.");
1136 module_param_array(regshifts, int, &num_regshifts, 0);
1137 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1138                  " IPMI register, in bits.  For instance, if the data"
1139                  " is read from a 32-bit word and the IPMI data is in"
1140                  " bit 8-15, then the shift would be 8");
1141 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1142 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1143                  " the controller.  Normally this is 0x20, but can be"
1144                  " overridden by this parm.  This is an array indexed"
1145                  " by interface number.");
1146 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1147 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1148                  " disabled(0).  Normally the IPMI driver auto-detects"
1149                  " this, but the value may be overridden by this parm.");
1150 module_param(unload_when_empty, int, 0);
1151 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1152                  " specified or found, default is 1.  Setting to 0"
1153                  " is useful for hot add of devices using hotmod.");
1154
1155
1156 static void std_irq_cleanup(struct smi_info *info)
1157 {
1158         if (info->si_type == SI_BT)
1159                 /* Disable the interrupt in the BT interface. */
1160                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1161         free_irq(info->irq, info);
1162 }
1163
1164 static int std_irq_setup(struct smi_info *info)
1165 {
1166         int rv;
1167
1168         if (!info->irq)
1169                 return 0;
1170
1171         if (info->si_type == SI_BT) {
1172                 rv = request_irq(info->irq,
1173                                  si_bt_irq_handler,
1174                                  IRQF_SHARED | IRQF_DISABLED,
1175                                  DEVICE_NAME,
1176                                  info);
1177                 if (!rv)
1178                         /* Enable the interrupt in the BT interface. */
1179                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1180                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1181         } else
1182                 rv = request_irq(info->irq,
1183                                  si_irq_handler,
1184                                  IRQF_SHARED | IRQF_DISABLED,
1185                                  DEVICE_NAME,
1186                                  info);
1187         if (rv) {
1188                 printk(KERN_WARNING
1189                        "ipmi_si: %s unable to claim interrupt %d,"
1190                        " running polled\n",
1191                        DEVICE_NAME, info->irq);
1192                 info->irq = 0;
1193         } else {
1194                 info->irq_cleanup = std_irq_cleanup;
1195                 printk("  Using irq %d\n", info->irq);
1196         }
1197
1198         return rv;
1199 }
1200
1201 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1202 {
1203         unsigned int addr = io->addr_data;
1204
1205         return inb(addr + (offset * io->regspacing));
1206 }
1207
1208 static void port_outb(struct si_sm_io *io, unsigned int offset,
1209                       unsigned char b)
1210 {
1211         unsigned int addr = io->addr_data;
1212
1213         outb(b, addr + (offset * io->regspacing));
1214 }
1215
1216 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1217 {
1218         unsigned int addr = io->addr_data;
1219
1220         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1221 }
1222
1223 static void port_outw(struct si_sm_io *io, unsigned int offset,
1224                       unsigned char b)
1225 {
1226         unsigned int addr = io->addr_data;
1227
1228         outw(b << io->regshift, addr + (offset * io->regspacing));
1229 }
1230
1231 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1232 {
1233         unsigned int addr = io->addr_data;
1234
1235         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1236 }
1237
1238 static void port_outl(struct si_sm_io *io, unsigned int offset,
1239                       unsigned char b)
1240 {
1241         unsigned int addr = io->addr_data;
1242
1243         outl(b << io->regshift, addr+(offset * io->regspacing));
1244 }
1245
1246 static void port_cleanup(struct smi_info *info)
1247 {
1248         unsigned int addr = info->io.addr_data;
1249         int          idx;
1250
1251         if (addr) {
1252                 for (idx = 0; idx < info->io_size; idx++) {
1253                         release_region(addr + idx * info->io.regspacing,
1254                                        info->io.regsize);
1255                 }
1256         }
1257 }
1258
1259 static int port_setup(struct smi_info *info)
1260 {
1261         unsigned int addr = info->io.addr_data;
1262         int          idx;
1263
1264         if (!addr)
1265                 return -ENODEV;
1266
1267         info->io_cleanup = port_cleanup;
1268
1269         /* Figure out the actual inb/inw/inl/etc routine to use based
1270            upon the register size. */
1271         switch (info->io.regsize) {
1272         case 1:
1273                 info->io.inputb = port_inb;
1274                 info->io.outputb = port_outb;
1275                 break;
1276         case 2:
1277                 info->io.inputb = port_inw;
1278                 info->io.outputb = port_outw;
1279                 break;
1280         case 4:
1281                 info->io.inputb = port_inl;
1282                 info->io.outputb = port_outl;
1283                 break;
1284         default:
1285                 printk("ipmi_si: Invalid register size: %d\n",
1286                        info->io.regsize);
1287                 return -EINVAL;
1288         }
1289
1290         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1291          * tables.  This causes problems when trying to register the
1292          * entire I/O region.  Therefore we must register each I/O
1293          * port separately.
1294          */
1295         for (idx = 0; idx < info->io_size; idx++) {
1296                 if (request_region(addr + idx * info->io.regspacing,
1297                                    info->io.regsize, DEVICE_NAME) == NULL) {
1298                         /* Undo allocations */
1299                         while (idx--) {
1300                                 release_region(addr + idx * info->io.regspacing,
1301                                                info->io.regsize);
1302                         }
1303                         return -EIO;
1304                 }
1305         }
1306         return 0;
1307 }
1308
1309 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1310 {
1311         return readb((io->addr)+(offset * io->regspacing));
1312 }
1313
1314 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1315                      unsigned char b)
1316 {
1317         writeb(b, (io->addr)+(offset * io->regspacing));
1318 }
1319
1320 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1321 {
1322         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1323                 & 0xff;
1324 }
1325
1326 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1327                      unsigned char b)
1328 {
1329         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1330 }
1331
1332 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1333 {
1334         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1335                 & 0xff;
1336 }
1337
1338 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1339                      unsigned char b)
1340 {
1341         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1342 }
1343
1344 #ifdef readq
1345 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1346 {
1347         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1348                 & 0xff;
1349 }
1350
1351 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1352                      unsigned char b)
1353 {
1354         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1355 }
1356 #endif
1357
1358 static void mem_cleanup(struct smi_info *info)
1359 {
1360         unsigned long addr = info->io.addr_data;
1361         int           mapsize;
1362
1363         if (info->io.addr) {
1364                 iounmap(info->io.addr);
1365
1366                 mapsize = ((info->io_size * info->io.regspacing)
1367                            - (info->io.regspacing - info->io.regsize));
1368
1369                 release_mem_region(addr, mapsize);
1370         }
1371 }
1372
1373 static int mem_setup(struct smi_info *info)
1374 {
1375         unsigned long addr = info->io.addr_data;
1376         int           mapsize;
1377
1378         if (!addr)
1379                 return -ENODEV;
1380
1381         info->io_cleanup = mem_cleanup;
1382
1383         /* Figure out the actual readb/readw/readl/etc routine to use based
1384            upon the register size. */
1385         switch (info->io.regsize) {
1386         case 1:
1387                 info->io.inputb = intf_mem_inb;
1388                 info->io.outputb = intf_mem_outb;
1389                 break;
1390         case 2:
1391                 info->io.inputb = intf_mem_inw;
1392                 info->io.outputb = intf_mem_outw;
1393                 break;
1394         case 4:
1395                 info->io.inputb = intf_mem_inl;
1396                 info->io.outputb = intf_mem_outl;
1397                 break;
1398 #ifdef readq
1399         case 8:
1400                 info->io.inputb = mem_inq;
1401                 info->io.outputb = mem_outq;
1402                 break;
1403 #endif
1404         default:
1405                 printk("ipmi_si: Invalid register size: %d\n",
1406                        info->io.regsize);
1407                 return -EINVAL;
1408         }
1409
1410         /* Calculate the total amount of memory to claim.  This is an
1411          * unusual looking calculation, but it avoids claiming any
1412          * more memory than it has to.  It will claim everything
1413          * between the first address to the end of the last full
1414          * register. */
1415         mapsize = ((info->io_size * info->io.regspacing)
1416                    - (info->io.regspacing - info->io.regsize));
1417
1418         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1419                 return -EIO;
1420
1421         info->io.addr = ioremap(addr, mapsize);
1422         if (info->io.addr == NULL) {
1423                 release_mem_region(addr, mapsize);
1424                 return -EIO;
1425         }
1426         return 0;
1427 }
1428
1429 /*
1430  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1431  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1432  * Options are:
1433  *   rsp=<regspacing>
1434  *   rsi=<regsize>
1435  *   rsh=<regshift>
1436  *   irq=<irq>
1437  *   ipmb=<ipmb addr>
1438  */
1439 enum hotmod_op { HM_ADD, HM_REMOVE };
1440 struct hotmod_vals {
1441         char *name;
1442         int  val;
1443 };
1444 static struct hotmod_vals hotmod_ops[] = {
1445         { "add",        HM_ADD },
1446         { "remove",     HM_REMOVE },
1447         { NULL }
1448 };
1449 static struct hotmod_vals hotmod_si[] = {
1450         { "kcs",        SI_KCS },
1451         { "smic",       SI_SMIC },
1452         { "bt",         SI_BT },
1453         { NULL }
1454 };
1455 static struct hotmod_vals hotmod_as[] = {
1456         { "mem",        IPMI_MEM_ADDR_SPACE },
1457         { "i/o",        IPMI_IO_ADDR_SPACE },
1458         { NULL }
1459 };
1460
1461 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1462 {
1463         char *s;
1464         int  i;
1465
1466         s = strchr(*curr, ',');
1467         if (!s) {
1468                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1469                 return -EINVAL;
1470         }
1471         *s = '\0';
1472         s++;
1473         for (i = 0; hotmod_ops[i].name; i++) {
1474                 if (strcmp(*curr, v[i].name) == 0) {
1475                         *val = v[i].val;
1476                         *curr = s;
1477                         return 0;
1478                 }
1479         }
1480
1481         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1482         return -EINVAL;
1483 }
1484
1485 static int check_hotmod_int_op(const char *curr, const char *option,
1486                                const char *name, int *val)
1487 {
1488         char *n;
1489
1490         if (strcmp(curr, name) == 0) {
1491                 if (!option) {
1492                         printk(KERN_WARNING PFX
1493                                "No option given for '%s'\n",
1494                                curr);
1495                         return -EINVAL;
1496                 }
1497                 *val = simple_strtoul(option, &n, 0);
1498                 if ((*n != '\0') || (*option == '\0')) {
1499                         printk(KERN_WARNING PFX
1500                                "Bad option given for '%s'\n",
1501                                curr);
1502                         return -EINVAL;
1503                 }
1504                 return 1;
1505         }
1506         return 0;
1507 }
1508
1509 static int hotmod_handler(const char *val, struct kernel_param *kp)
1510 {
1511         char *str = kstrdup(val, GFP_KERNEL);
1512         int  rv;
1513         char *next, *curr, *s, *n, *o;
1514         enum hotmod_op op;
1515         enum si_type si_type;
1516         int  addr_space;
1517         unsigned long addr;
1518         int regspacing;
1519         int regsize;
1520         int regshift;
1521         int irq;
1522         int ipmb;
1523         int ival;
1524         int len;
1525         struct smi_info *info;
1526
1527         if (!str)
1528                 return -ENOMEM;
1529
1530         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1531         len = strlen(str);
1532         ival = len - 1;
1533         while ((ival >= 0) && isspace(str[ival])) {
1534                 str[ival] = '\0';
1535                 ival--;
1536         }
1537
1538         for (curr = str; curr; curr = next) {
1539                 regspacing = 1;
1540                 regsize = 1;
1541                 regshift = 0;
1542                 irq = 0;
1543                 ipmb = 0x20;
1544
1545                 next = strchr(curr, ':');
1546                 if (next) {
1547                         *next = '\0';
1548                         next++;
1549                 }
1550
1551                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1552                 if (rv)
1553                         break;
1554                 op = ival;
1555
1556                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1557                 if (rv)
1558                         break;
1559                 si_type = ival;
1560
1561                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1562                 if (rv)
1563                         break;
1564
1565                 s = strchr(curr, ',');
1566                 if (s) {
1567                         *s = '\0';
1568                         s++;
1569                 }
1570                 addr = simple_strtoul(curr, &n, 0);
1571                 if ((*n != '\0') || (*curr == '\0')) {
1572                         printk(KERN_WARNING PFX "Invalid hotmod address"
1573                                " '%s'\n", curr);
1574                         break;
1575                 }
1576
1577                 while (s) {
1578                         curr = s;
1579                         s = strchr(curr, ',');
1580                         if (s) {
1581                                 *s = '\0';
1582                                 s++;
1583                         }
1584                         o = strchr(curr, '=');
1585                         if (o) {
1586                                 *o = '\0';
1587                                 o++;
1588                         }
1589                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1590                         if (rv < 0)
1591                                 goto out;
1592                         else if (rv)
1593                                 continue;
1594                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1595                         if (rv < 0)
1596                                 goto out;
1597                         else if (rv)
1598                                 continue;
1599                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1600                         if (rv < 0)
1601                                 goto out;
1602                         else if (rv)
1603                                 continue;
1604                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1605                         if (rv < 0)
1606                                 goto out;
1607                         else if (rv)
1608                                 continue;
1609                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1610                         if (rv < 0)
1611                                 goto out;
1612                         else if (rv)
1613                                 continue;
1614
1615                         rv = -EINVAL;
1616                         printk(KERN_WARNING PFX
1617                                "Invalid hotmod option '%s'\n",
1618                                curr);
1619                         goto out;
1620                 }
1621
1622                 if (op == HM_ADD) {
1623                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1624                         if (!info) {
1625                                 rv = -ENOMEM;
1626                                 goto out;
1627                         }
1628
1629                         info->addr_source = "hotmod";
1630                         info->si_type = si_type;
1631                         info->io.addr_data = addr;
1632                         info->io.addr_type = addr_space;
1633                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1634                                 info->io_setup = mem_setup;
1635                         else
1636                                 info->io_setup = port_setup;
1637
1638                         info->io.addr = NULL;
1639                         info->io.regspacing = regspacing;
1640                         if (!info->io.regspacing)
1641                                 info->io.regspacing = DEFAULT_REGSPACING;
1642                         info->io.regsize = regsize;
1643                         if (!info->io.regsize)
1644                                 info->io.regsize = DEFAULT_REGSPACING;
1645                         info->io.regshift = regshift;
1646                         info->irq = irq;
1647                         if (info->irq)
1648                                 info->irq_setup = std_irq_setup;
1649                         info->slave_addr = ipmb;
1650
1651                         try_smi_init(info);
1652                 } else {
1653                         /* remove */
1654                         struct smi_info *e, *tmp_e;
1655
1656                         mutex_lock(&smi_infos_lock);
1657                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1658                                 if (e->io.addr_type != addr_space)
1659                                         continue;
1660                                 if (e->si_type != si_type)
1661                                         continue;
1662                                 if (e->io.addr_data == addr)
1663                                         cleanup_one_si(e);
1664                         }
1665                         mutex_unlock(&smi_infos_lock);
1666                 }
1667         }
1668         rv = len;
1669  out:
1670         kfree(str);
1671         return rv;
1672 }
1673
1674 static __devinit void hardcode_find_bmc(void)
1675 {
1676         int             i;
1677         struct smi_info *info;
1678
1679         for (i = 0; i < SI_MAX_PARMS; i++) {
1680                 if (!ports[i] && !addrs[i])
1681                         continue;
1682
1683                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1684                 if (!info)
1685                         return;
1686
1687                 info->addr_source = "hardcoded";
1688
1689                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1690                         info->si_type = SI_KCS;
1691                 } else if (strcmp(si_type[i], "smic") == 0) {
1692                         info->si_type = SI_SMIC;
1693                 } else if (strcmp(si_type[i], "bt") == 0) {
1694                         info->si_type = SI_BT;
1695                 } else {
1696                         printk(KERN_WARNING
1697                                "ipmi_si: Interface type specified "
1698                                "for interface %d, was invalid: %s\n",
1699                                i, si_type[i]);
1700                         kfree(info);
1701                         continue;
1702                 }
1703
1704                 if (ports[i]) {
1705                         /* An I/O port */
1706                         info->io_setup = port_setup;
1707                         info->io.addr_data = ports[i];
1708                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1709                 } else if (addrs[i]) {
1710                         /* A memory port */
1711                         info->io_setup = mem_setup;
1712                         info->io.addr_data = addrs[i];
1713                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1714                 } else {
1715                         printk(KERN_WARNING
1716                                "ipmi_si: Interface type specified "
1717                                "for interface %d, "
1718                                "but port and address were not set or "
1719                                "set to zero.\n", i);
1720                         kfree(info);
1721                         continue;
1722                 }
1723
1724                 info->io.addr = NULL;
1725                 info->io.regspacing = regspacings[i];
1726                 if (!info->io.regspacing)
1727                         info->io.regspacing = DEFAULT_REGSPACING;
1728                 info->io.regsize = regsizes[i];
1729                 if (!info->io.regsize)
1730                         info->io.regsize = DEFAULT_REGSPACING;
1731                 info->io.regshift = regshifts[i];
1732                 info->irq = irqs[i];
1733                 if (info->irq)
1734                         info->irq_setup = std_irq_setup;
1735
1736                 try_smi_init(info);
1737         }
1738 }
1739
1740 #ifdef CONFIG_ACPI
1741
1742 #include <linux/acpi.h>
1743
1744 /* Once we get an ACPI failure, we don't try any more, because we go
1745    through the tables sequentially.  Once we don't find a table, there
1746    are no more. */
1747 static int acpi_failure;
1748
1749 /* For GPE-type interrupts. */
1750 static u32 ipmi_acpi_gpe(void *context)
1751 {
1752         struct smi_info *smi_info = context;
1753         unsigned long   flags;
1754 #ifdef DEBUG_TIMING
1755         struct timeval t;
1756 #endif
1757
1758         spin_lock_irqsave(&(smi_info->si_lock), flags);
1759
1760         spin_lock(&smi_info->count_lock);
1761         smi_info->interrupts++;
1762         spin_unlock(&smi_info->count_lock);
1763
1764 #ifdef DEBUG_TIMING
1765         do_gettimeofday(&t);
1766         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1767 #endif
1768         smi_event_handler(smi_info, 0);
1769         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1770
1771         return ACPI_INTERRUPT_HANDLED;
1772 }
1773
1774 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1775 {
1776         if (!info->irq)
1777                 return;
1778
1779         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1780 }
1781
1782 static int acpi_gpe_irq_setup(struct smi_info *info)
1783 {
1784         acpi_status status;
1785
1786         if (!info->irq)
1787                 return 0;
1788
1789         /* FIXME - is level triggered right? */
1790         status = acpi_install_gpe_handler(NULL,
1791                                           info->irq,
1792                                           ACPI_GPE_LEVEL_TRIGGERED,
1793                                           &ipmi_acpi_gpe,
1794                                           info);
1795         if (status != AE_OK) {
1796                 printk(KERN_WARNING
1797                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1798                        " running polled\n",
1799                        DEVICE_NAME, info->irq);
1800                 info->irq = 0;
1801                 return -EINVAL;
1802         } else {
1803                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1804                 printk("  Using ACPI GPE %d\n", info->irq);
1805                 return 0;
1806         }
1807 }
1808
1809 /*
1810  * Defined at
1811  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1812  */
1813 struct SPMITable {
1814         s8      Signature[4];
1815         u32     Length;
1816         u8      Revision;
1817         u8      Checksum;
1818         s8      OEMID[6];
1819         s8      OEMTableID[8];
1820         s8      OEMRevision[4];
1821         s8      CreatorID[4];
1822         s8      CreatorRevision[4];
1823         u8      InterfaceType;
1824         u8      IPMIlegacy;
1825         s16     SpecificationRevision;
1826
1827         /*
1828          * Bit 0 - SCI interrupt supported
1829          * Bit 1 - I/O APIC/SAPIC
1830          */
1831         u8      InterruptType;
1832
1833         /* If bit 0 of InterruptType is set, then this is the SCI
1834            interrupt in the GPEx_STS register. */
1835         u8      GPE;
1836
1837         s16     Reserved;
1838
1839         /* If bit 1 of InterruptType is set, then this is the I/O
1840            APIC/SAPIC interrupt. */
1841         u32     GlobalSystemInterrupt;
1842
1843         /* The actual register address. */
1844         struct acpi_generic_address addr;
1845
1846         u8      UID[4];
1847
1848         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1849 };
1850
1851 static __devinit int try_init_acpi(struct SPMITable *spmi)
1852 {
1853         struct smi_info  *info;
1854         u8               addr_space;
1855
1856         if (spmi->IPMIlegacy != 1) {
1857             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1858             return -ENODEV;
1859         }
1860
1861         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1862                 addr_space = IPMI_MEM_ADDR_SPACE;
1863         else
1864                 addr_space = IPMI_IO_ADDR_SPACE;
1865
1866         info = kzalloc(sizeof(*info), GFP_KERNEL);
1867         if (!info) {
1868                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1869                 return -ENOMEM;
1870         }
1871
1872         info->addr_source = "ACPI";
1873
1874         /* Figure out the interface type. */
1875         switch (spmi->InterfaceType)
1876         {
1877         case 1: /* KCS */
1878                 info->si_type = SI_KCS;
1879                 break;
1880         case 2: /* SMIC */
1881                 info->si_type = SI_SMIC;
1882                 break;
1883         case 3: /* BT */
1884                 info->si_type = SI_BT;
1885                 break;
1886         default:
1887                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1888                         spmi->InterfaceType);
1889                 kfree(info);
1890                 return -EIO;
1891         }
1892
1893         if (spmi->InterruptType & 1) {
1894                 /* We've got a GPE interrupt. */
1895                 info->irq = spmi->GPE;
1896                 info->irq_setup = acpi_gpe_irq_setup;
1897         } else if (spmi->InterruptType & 2) {
1898                 /* We've got an APIC/SAPIC interrupt. */
1899                 info->irq = spmi->GlobalSystemInterrupt;
1900                 info->irq_setup = std_irq_setup;
1901         } else {
1902                 /* Use the default interrupt setting. */
1903                 info->irq = 0;
1904                 info->irq_setup = NULL;
1905         }
1906
1907         if (spmi->addr.bit_width) {
1908                 /* A (hopefully) properly formed register bit width. */
1909                 info->io.regspacing = spmi->addr.bit_width / 8;
1910         } else {
1911                 info->io.regspacing = DEFAULT_REGSPACING;
1912         }
1913         info->io.regsize = info->io.regspacing;
1914         info->io.regshift = spmi->addr.bit_offset;
1915
1916         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1917                 info->io_setup = mem_setup;
1918                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1919         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1920                 info->io_setup = port_setup;
1921                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1922         } else {
1923                 kfree(info);
1924                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1925                 return -EIO;
1926         }
1927         info->io.addr_data = spmi->addr.address;
1928
1929         try_smi_init(info);
1930
1931         return 0;
1932 }
1933
1934 static __devinit void acpi_find_bmc(void)
1935 {
1936         acpi_status      status;
1937         struct SPMITable *spmi;
1938         int              i;
1939
1940         if (acpi_disabled)
1941                 return;
1942
1943         if (acpi_failure)
1944                 return;
1945
1946         for (i = 0; ; i++) {
1947                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1948                                         (struct acpi_table_header **)&spmi);
1949                 if (status != AE_OK)
1950                         return;
1951
1952                 try_init_acpi(spmi);
1953         }
1954 }
1955 #endif
1956
1957 #ifdef CONFIG_DMI
1958 struct dmi_ipmi_data
1959 {
1960         u8              type;
1961         u8              addr_space;
1962         unsigned long   base_addr;
1963         u8              irq;
1964         u8              offset;
1965         u8              slave_addr;
1966 };
1967
1968 static int __devinit decode_dmi(const struct dmi_header *dm,
1969                                 struct dmi_ipmi_data *dmi)
1970 {
1971         const u8        *data = (const u8 *)dm;
1972         unsigned long   base_addr;
1973         u8              reg_spacing;
1974         u8              len = dm->length;
1975
1976         dmi->type = data[4];
1977
1978         memcpy(&base_addr, data+8, sizeof(unsigned long));
1979         if (len >= 0x11) {
1980                 if (base_addr & 1) {
1981                         /* I/O */
1982                         base_addr &= 0xFFFE;
1983                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1984                 }
1985                 else {
1986                         /* Memory */
1987                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1988                 }
1989                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1990                    is odd. */
1991                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1992
1993                 dmi->irq = data[0x11];
1994
1995                 /* The top two bits of byte 0x10 hold the register spacing. */
1996                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1997                 switch(reg_spacing){
1998                 case 0x00: /* Byte boundaries */
1999                     dmi->offset = 1;
2000                     break;
2001                 case 0x01: /* 32-bit boundaries */
2002                     dmi->offset = 4;
2003                     break;
2004                 case 0x02: /* 16-byte boundaries */
2005                     dmi->offset = 16;
2006                     break;
2007                 default:
2008                     /* Some other interface, just ignore it. */
2009                     return -EIO;
2010                 }
2011         } else {
2012                 /* Old DMI spec. */
2013                 /* Note that technically, the lower bit of the base
2014                  * address should be 1 if the address is I/O and 0 if
2015                  * the address is in memory.  So many systems get that
2016                  * wrong (and all that I have seen are I/O) so we just
2017                  * ignore that bit and assume I/O.  Systems that use
2018                  * memory should use the newer spec, anyway. */
2019                 dmi->base_addr = base_addr & 0xfffe;
2020                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2021                 dmi->offset = 1;
2022         }
2023
2024         dmi->slave_addr = data[6];
2025
2026         return 0;
2027 }
2028
2029 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2030 {
2031         struct smi_info *info;
2032
2033         info = kzalloc(sizeof(*info), GFP_KERNEL);
2034         if (!info) {
2035                 printk(KERN_ERR
2036                        "ipmi_si: Could not allocate SI data\n");
2037                 return;
2038         }
2039
2040         info->addr_source = "SMBIOS";
2041
2042         switch (ipmi_data->type) {
2043         case 0x01: /* KCS */
2044                 info->si_type = SI_KCS;
2045                 break;
2046         case 0x02: /* SMIC */
2047                 info->si_type = SI_SMIC;
2048                 break;
2049         case 0x03: /* BT */
2050                 info->si_type = SI_BT;
2051                 break;
2052         default:
2053                 kfree(info);
2054                 return;
2055         }
2056
2057         switch (ipmi_data->addr_space) {
2058         case IPMI_MEM_ADDR_SPACE:
2059                 info->io_setup = mem_setup;
2060                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2061                 break;
2062
2063         case IPMI_IO_ADDR_SPACE:
2064                 info->io_setup = port_setup;
2065                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2066                 break;
2067
2068         default:
2069                 kfree(info);
2070                 printk(KERN_WARNING
2071                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2072                        ipmi_data->addr_space);
2073                 return;
2074         }
2075         info->io.addr_data = ipmi_data->base_addr;
2076
2077         info->io.regspacing = ipmi_data->offset;
2078         if (!info->io.regspacing)
2079                 info->io.regspacing = DEFAULT_REGSPACING;
2080         info->io.regsize = DEFAULT_REGSPACING;
2081         info->io.regshift = 0;
2082
2083         info->slave_addr = ipmi_data->slave_addr;
2084
2085         info->irq = ipmi_data->irq;
2086         if (info->irq)
2087                 info->irq_setup = std_irq_setup;
2088
2089         try_smi_init(info);
2090 }
2091
2092 static void __devinit dmi_find_bmc(void)
2093 {
2094         const struct dmi_device *dev = NULL;
2095         struct dmi_ipmi_data data;
2096         int                  rv;
2097
2098         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2099                 memset(&data, 0, sizeof(data));
2100                 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2101                                 &data);
2102                 if (!rv)
2103                         try_init_dmi(&data);
2104         }
2105 }
2106 #endif /* CONFIG_DMI */
2107
2108 #ifdef CONFIG_PCI
2109
2110 #define PCI_ERMC_CLASSCODE              0x0C0700
2111 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2112 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2113 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2114 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2115 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2116
2117 #define PCI_HP_VENDOR_ID    0x103C
2118 #define PCI_MMC_DEVICE_ID   0x121A
2119 #define PCI_MMC_ADDR_CW     0x10
2120
2121 static void ipmi_pci_cleanup(struct smi_info *info)
2122 {
2123         struct pci_dev *pdev = info->addr_source_data;
2124
2125         pci_disable_device(pdev);
2126 }
2127
2128 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2129                                     const struct pci_device_id *ent)
2130 {
2131         int rv;
2132         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2133         struct smi_info *info;
2134         int first_reg_offset = 0;
2135
2136         info = kzalloc(sizeof(*info), GFP_KERNEL);
2137         if (!info)
2138                 return -ENOMEM;
2139
2140         info->addr_source = "PCI";
2141
2142         switch (class_type) {
2143         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2144                 info->si_type = SI_SMIC;
2145                 break;
2146
2147         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2148                 info->si_type = SI_KCS;
2149                 break;
2150
2151         case PCI_ERMC_CLASSCODE_TYPE_BT:
2152                 info->si_type = SI_BT;
2153                 break;
2154
2155         default:
2156                 kfree(info);
2157                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2158                        pci_name(pdev), class_type);
2159                 return -ENOMEM;
2160         }
2161
2162         rv = pci_enable_device(pdev);
2163         if (rv) {
2164                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2165                        pci_name(pdev));
2166                 kfree(info);
2167                 return rv;
2168         }
2169
2170         info->addr_source_cleanup = ipmi_pci_cleanup;
2171         info->addr_source_data = pdev;
2172
2173         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2174                 first_reg_offset = 1;
2175
2176         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2177                 info->io_setup = port_setup;
2178                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2179         } else {
2180                 info->io_setup = mem_setup;
2181                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2182         }
2183         info->io.addr_data = pci_resource_start(pdev, 0);
2184
2185         info->io.regspacing = DEFAULT_REGSPACING;
2186         info->io.regsize = DEFAULT_REGSPACING;
2187         info->io.regshift = 0;
2188
2189         info->irq = pdev->irq;
2190         if (info->irq)
2191                 info->irq_setup = std_irq_setup;
2192
2193         info->dev = &pdev->dev;
2194         pci_set_drvdata(pdev, info);
2195
2196         return try_smi_init(info);
2197 }
2198
2199 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2200 {
2201         struct smi_info *info = pci_get_drvdata(pdev);
2202         cleanup_one_si(info);
2203 }
2204
2205 #ifdef CONFIG_PM
2206 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2207 {
2208         return 0;
2209 }
2210
2211 static int ipmi_pci_resume(struct pci_dev *pdev)
2212 {
2213         return 0;
2214 }
2215 #endif
2216
2217 static struct pci_device_id ipmi_pci_devices[] = {
2218         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2219         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2220         { 0, }
2221 };
2222 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2223
2224 static struct pci_driver ipmi_pci_driver = {
2225         .name =         DEVICE_NAME,
2226         .id_table =     ipmi_pci_devices,
2227         .probe =        ipmi_pci_probe,
2228         .remove =       __devexit_p(ipmi_pci_remove),
2229 #ifdef CONFIG_PM
2230         .suspend =      ipmi_pci_suspend,
2231         .resume =       ipmi_pci_resume,
2232 #endif
2233 };
2234 #endif /* CONFIG_PCI */
2235
2236
2237 #ifdef CONFIG_PPC_OF
2238 static int __devinit ipmi_of_probe(struct of_device *dev,
2239                          const struct of_device_id *match)
2240 {
2241         struct smi_info *info;
2242         struct resource resource;
2243         const int *regsize, *regspacing, *regshift;
2244         struct device_node *np = dev->node;
2245         int ret;
2246         int proplen;
2247
2248         dev_info(&dev->dev, PFX "probing via device tree\n");
2249
2250         ret = of_address_to_resource(np, 0, &resource);
2251         if (ret) {
2252                 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2253                 return ret;
2254         }
2255
2256         regsize = of_get_property(np, "reg-size", &proplen);
2257         if (regsize && proplen != 4) {
2258                 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2259                 return -EINVAL;
2260         }
2261
2262         regspacing = of_get_property(np, "reg-spacing", &proplen);
2263         if (regspacing && proplen != 4) {
2264                 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2265                 return -EINVAL;
2266         }
2267
2268         regshift = of_get_property(np, "reg-shift", &proplen);
2269         if (regshift && proplen != 4) {
2270                 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2271                 return -EINVAL;
2272         }
2273
2274         info = kzalloc(sizeof(*info), GFP_KERNEL);
2275
2276         if (!info) {
2277                 dev_err(&dev->dev,
2278                         PFX "could not allocate memory for OF probe\n");
2279                 return -ENOMEM;
2280         }
2281
2282         info->si_type           = (enum si_type) match->data;
2283         info->addr_source       = "device-tree";
2284         info->io_setup          = mem_setup;
2285         info->irq_setup         = std_irq_setup;
2286
2287         info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2288         info->io.addr_data      = resource.start;
2289
2290         info->io.regsize        = regsize ? *regsize : DEFAULT_REGSIZE;
2291         info->io.regspacing     = regspacing ? *regspacing : DEFAULT_REGSPACING;
2292         info->io.regshift       = regshift ? *regshift : 0;
2293
2294         info->irq               = irq_of_parse_and_map(dev->node, 0);
2295         info->dev               = &dev->dev;
2296
2297         dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
2298                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2299                 info->irq);
2300
2301         dev->dev.driver_data = (void*) info;
2302
2303         return try_smi_init(info);
2304 }
2305
2306 static int __devexit ipmi_of_remove(struct of_device *dev)
2307 {
2308         cleanup_one_si(dev->dev.driver_data);
2309         return 0;
2310 }
2311
2312 static struct of_device_id ipmi_match[] =
2313 {
2314         { .type = "ipmi", .compatible = "ipmi-kcs",  .data = (void *)(unsigned long) SI_KCS },
2315         { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2316         { .type = "ipmi", .compatible = "ipmi-bt",   .data = (void *)(unsigned long) SI_BT },
2317         {},
2318 };
2319
2320 static struct of_platform_driver ipmi_of_platform_driver =
2321 {
2322         .name           = "ipmi",
2323         .match_table    = ipmi_match,
2324         .probe          = ipmi_of_probe,
2325         .remove         = __devexit_p(ipmi_of_remove),
2326 };
2327 #endif /* CONFIG_PPC_OF */
2328
2329
2330 static int try_get_dev_id(struct smi_info *smi_info)
2331 {
2332         unsigned char         msg[2];
2333         unsigned char         *resp;
2334         unsigned long         resp_len;
2335         enum si_sm_result     smi_result;
2336         int                   rv = 0;
2337
2338         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2339         if (!resp)
2340                 return -ENOMEM;
2341
2342         /* Do a Get Device ID command, since it comes back with some
2343            useful info. */
2344         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2345         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2346         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2347
2348         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2349         for (;;)
2350         {
2351                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2352                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2353                         schedule_timeout_uninterruptible(1);
2354                         smi_result = smi_info->handlers->event(
2355                                 smi_info->si_sm, 100);
2356                 }
2357                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2358                 {
2359                         smi_result = smi_info->handlers->event(
2360                                 smi_info->si_sm, 0);
2361                 }
2362                 else
2363                         break;
2364         }
2365         if (smi_result == SI_SM_HOSED) {
2366                 /* We couldn't get the state machine to run, so whatever's at
2367                    the port is probably not an IPMI SMI interface. */
2368                 rv = -ENODEV;
2369                 goto out;
2370         }
2371
2372         /* Otherwise, we got some data. */
2373         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2374                                                   resp, IPMI_MAX_MSG_LENGTH);
2375         if (resp_len < 14) {
2376                 /* That's odd, it should be longer. */
2377                 rv = -EINVAL;
2378                 goto out;
2379         }
2380
2381         if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2382                 /* That's odd, it shouldn't be able to fail. */
2383                 rv = -EINVAL;
2384                 goto out;
2385         }
2386
2387         /* Record info from the get device id, in case we need it. */
2388         ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2389
2390  out:
2391         kfree(resp);
2392         return rv;
2393 }
2394
2395 static int type_file_read_proc(char *page, char **start, off_t off,
2396                                int count, int *eof, void *data)
2397 {
2398         struct smi_info *smi = data;
2399
2400         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2401 }
2402
2403 static int stat_file_read_proc(char *page, char **start, off_t off,
2404                                int count, int *eof, void *data)
2405 {
2406         char            *out = (char *) page;
2407         struct smi_info *smi = data;
2408
2409         out += sprintf(out, "interrupts_enabled:    %d\n",
2410                        smi->irq && !smi->interrupt_disabled);
2411         out += sprintf(out, "short_timeouts:        %ld\n",
2412                        smi->short_timeouts);
2413         out += sprintf(out, "long_timeouts:         %ld\n",
2414                        smi->long_timeouts);
2415         out += sprintf(out, "timeout_restarts:      %ld\n",
2416                        smi->timeout_restarts);
2417         out += sprintf(out, "idles:                 %ld\n",
2418                        smi->idles);
2419         out += sprintf(out, "interrupts:            %ld\n",
2420                        smi->interrupts);
2421         out += sprintf(out, "attentions:            %ld\n",
2422                        smi->attentions);
2423         out += sprintf(out, "flag_fetches:          %ld\n",
2424                        smi->flag_fetches);
2425         out += sprintf(out, "hosed_count:           %ld\n",
2426                        smi->hosed_count);
2427         out += sprintf(out, "complete_transactions: %ld\n",
2428                        smi->complete_transactions);
2429         out += sprintf(out, "events:                %ld\n",
2430                        smi->events);
2431         out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
2432                        smi->watchdog_pretimeouts);
2433         out += sprintf(out, "incoming_messages:     %ld\n",
2434                        smi->incoming_messages);
2435
2436         return out - page;
2437 }
2438
2439 static int param_read_proc(char *page, char **start, off_t off,
2440                            int count, int *eof, void *data)
2441 {
2442         struct smi_info *smi = data;
2443
2444         return sprintf(page,
2445                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2446                        si_to_str[smi->si_type],
2447                        addr_space_to_str[smi->io.addr_type],
2448                        smi->io.addr_data,
2449                        smi->io.regspacing,
2450                        smi->io.regsize,
2451                        smi->io.regshift,
2452                        smi->irq,
2453                        smi->slave_addr);
2454 }
2455
2456 /*
2457  * oem_data_avail_to_receive_msg_avail
2458  * @info - smi_info structure with msg_flags set
2459  *
2460  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2461  * Returns 1 indicating need to re-run handle_flags().
2462  */
2463 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2464 {
2465         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2466                                 RECEIVE_MSG_AVAIL);
2467         return 1;
2468 }
2469
2470 /*
2471  * setup_dell_poweredge_oem_data_handler
2472  * @info - smi_info.device_id must be populated
2473  *
2474  * Systems that match, but have firmware version < 1.40 may assert
2475  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2476  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2477  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2478  * as RECEIVE_MSG_AVAIL instead.
2479  *
2480  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2481  * assert the OEM[012] bits, and if it did, the driver would have to
2482  * change to handle that properly, we don't actually check for the
2483  * firmware version.
2484  * Device ID = 0x20                BMC on PowerEdge 8G servers
2485  * Device Revision = 0x80
2486  * Firmware Revision1 = 0x01       BMC version 1.40
2487  * Firmware Revision2 = 0x40       BCD encoded
2488  * IPMI Version = 0x51             IPMI 1.5
2489  * Manufacturer ID = A2 02 00      Dell IANA
2490  *
2491  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2492  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2493  *
2494  */
2495 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2496 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2497 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2498 #define DELL_IANA_MFR_ID 0x0002a2
2499 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2500 {
2501         struct ipmi_device_id *id = &smi_info->device_id;
2502         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2503                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2504                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2505                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2506                         smi_info->oem_data_avail_handler =
2507                                 oem_data_avail_to_receive_msg_avail;
2508                 }
2509                 else if (ipmi_version_major(id) < 1 ||
2510                          (ipmi_version_major(id) == 1 &&
2511                           ipmi_version_minor(id) < 5)) {
2512                         smi_info->oem_data_avail_handler =
2513                                 oem_data_avail_to_receive_msg_avail;
2514                 }
2515         }
2516 }
2517
2518 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2519 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2520 {
2521         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2522
2523         /* Make it a reponse */
2524         msg->rsp[0] = msg->data[0] | 4;
2525         msg->rsp[1] = msg->data[1];
2526         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2527         msg->rsp_size = 3;
2528         smi_info->curr_msg = NULL;
2529         deliver_recv_msg(smi_info, msg);
2530 }
2531
2532 /*
2533  * dell_poweredge_bt_xaction_handler
2534  * @info - smi_info.device_id must be populated
2535  *
2536  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2537  * not respond to a Get SDR command if the length of the data
2538  * requested is exactly 0x3A, which leads to command timeouts and no
2539  * data returned.  This intercepts such commands, and causes userspace
2540  * callers to try again with a different-sized buffer, which succeeds.
2541  */
2542
2543 #define STORAGE_NETFN 0x0A
2544 #define STORAGE_CMD_GET_SDR 0x23
2545 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2546                                              unsigned long unused,
2547                                              void *in)
2548 {
2549         struct smi_info *smi_info = in;
2550         unsigned char *data = smi_info->curr_msg->data;
2551         unsigned int size   = smi_info->curr_msg->data_size;
2552         if (size >= 8 &&
2553             (data[0]>>2) == STORAGE_NETFN &&
2554             data[1] == STORAGE_CMD_GET_SDR &&
2555             data[7] == 0x3A) {
2556                 return_hosed_msg_badsize(smi_info);
2557                 return NOTIFY_STOP;
2558         }
2559         return NOTIFY_DONE;
2560 }
2561
2562 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2563         .notifier_call  = dell_poweredge_bt_xaction_handler,
2564 };
2565
2566 /*
2567  * setup_dell_poweredge_bt_xaction_handler
2568  * @info - smi_info.device_id must be filled in already
2569  *
2570  * Fills in smi_info.device_id.start_transaction_pre_hook
2571  * when we know what function to use there.
2572  */
2573 static void
2574 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2575 {
2576         struct ipmi_device_id *id = &smi_info->device_id;
2577         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2578             smi_info->si_type == SI_BT)
2579                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2580 }
2581
2582 /*
2583  * setup_oem_data_handler
2584  * @info - smi_info.device_id must be filled in already
2585  *
2586  * Fills in smi_info.device_id.oem_data_available_handler
2587  * when we know what function to use there.
2588  */
2589
2590 static void setup_oem_data_handler(struct smi_info *smi_info)
2591 {
2592         setup_dell_poweredge_oem_data_handler(smi_info);
2593 }
2594
2595 static void setup_xaction_handlers(struct smi_info *smi_info)
2596 {
2597         setup_dell_poweredge_bt_xaction_handler(smi_info);
2598 }
2599
2600 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2601 {
2602         if (smi_info->intf) {
2603                 /* The timer and thread are only running if the
2604                    interface has been started up and registered. */
2605                 if (smi_info->thread != NULL)
2606                         kthread_stop(smi_info->thread);
2607                 del_timer_sync(&smi_info->si_timer);
2608         }
2609 }
2610
2611 static __devinitdata struct ipmi_default_vals
2612 {
2613         int type;
2614         int port;
2615 } ipmi_defaults[] =
2616 {
2617         { .type = SI_KCS, .port = 0xca2 },
2618         { .type = SI_SMIC, .port = 0xca9 },
2619         { .type = SI_BT, .port = 0xe4 },
2620         { .port = 0 }
2621 };
2622
2623 static __devinit void default_find_bmc(void)
2624 {
2625         struct smi_info *info;
2626         int             i;
2627
2628         for (i = 0; ; i++) {
2629                 if (!ipmi_defaults[i].port)
2630                         break;
2631
2632                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2633                 if (!info)
2634                         return;
2635
2636 #ifdef CONFIG_PPC_MERGE
2637                 if (check_legacy_ioport(ipmi_defaults[i].port))
2638                         continue;
2639 #endif
2640
2641                 info->addr_source = NULL;
2642
2643                 info->si_type = ipmi_defaults[i].type;
2644                 info->io_setup = port_setup;
2645                 info->io.addr_data = ipmi_defaults[i].port;
2646                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2647
2648                 info->io.addr = NULL;
2649                 info->io.regspacing = DEFAULT_REGSPACING;
2650                 info->io.regsize = DEFAULT_REGSPACING;
2651                 info->io.regshift = 0;
2652
2653                 if (try_smi_init(info) == 0) {
2654                         /* Found one... */
2655                         printk(KERN_INFO "ipmi_si: Found default %s state"
2656                                " machine at %s address 0x%lx\n",
2657                                si_to_str[info->si_type],
2658                                addr_space_to_str[info->io.addr_type],
2659                                info->io.addr_data);
2660                         return;
2661                 }
2662         }
2663 }
2664
2665 static int is_new_interface(struct smi_info *info)
2666 {
2667         struct smi_info *e;
2668
2669         list_for_each_entry(e, &smi_infos, link) {
2670                 if (e->io.addr_type != info->io.addr_type)
2671                         continue;
2672                 if (e->io.addr_data == info->io.addr_data)
2673                         return 0;
2674         }
2675
2676         return 1;
2677 }
2678
2679 static int try_smi_init(struct smi_info *new_smi)
2680 {
2681         int rv;
2682
2683         if (new_smi->addr_source) {
2684                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2685                        " machine at %s address 0x%lx, slave address 0x%x,"
2686                        " irq %d\n",
2687                        new_smi->addr_source,
2688                        si_to_str[new_smi->si_type],
2689                        addr_space_to_str[new_smi->io.addr_type],
2690                        new_smi->io.addr_data,
2691                        new_smi->slave_addr, new_smi->irq);
2692         }
2693
2694         mutex_lock(&smi_infos_lock);
2695         if (!is_new_interface(new_smi)) {
2696                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2697                 rv = -EBUSY;
2698                 goto out_err;
2699         }
2700
2701         /* So we know not to free it unless we have allocated one. */
2702         new_smi->intf = NULL;
2703         new_smi->si_sm = NULL;
2704         new_smi->handlers = NULL;
2705
2706         switch (new_smi->si_type) {
2707         case SI_KCS:
2708                 new_smi->handlers = &kcs_smi_handlers;
2709                 break;
2710
2711         case SI_SMIC:
2712                 new_smi->handlers = &smic_smi_handlers;
2713                 break;
2714
2715         case SI_BT:
2716                 new_smi->handlers = &bt_smi_handlers;
2717                 break;
2718
2719         default:
2720                 /* No support for anything else yet. */
2721                 rv = -EIO;
2722                 goto out_err;
2723         }
2724
2725         /* Allocate the state machine's data and initialize it. */
2726         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2727         if (!new_smi->si_sm) {
2728                 printk(" Could not allocate state machine memory\n");
2729                 rv = -ENOMEM;
2730                 goto out_err;
2731         }
2732         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2733                                                         &new_smi->io);
2734
2735         /* Now that we know the I/O size, we can set up the I/O. */
2736         rv = new_smi->io_setup(new_smi);
2737         if (rv) {
2738                 printk(" Could not set up I/O space\n");
2739                 goto out_err;
2740         }
2741
2742         spin_lock_init(&(new_smi->si_lock));
2743         spin_lock_init(&(new_smi->msg_lock));
2744         spin_lock_init(&(new_smi->count_lock));
2745
2746         /* Do low-level detection first. */
2747         if (new_smi->handlers->detect(new_smi->si_sm)) {
2748                 if (new_smi->addr_source)
2749                         printk(KERN_INFO "ipmi_si: Interface detection"
2750                                " failed\n");
2751                 rv = -ENODEV;
2752                 goto out_err;
2753         }
2754
2755         /* Attempt a get device id command.  If it fails, we probably
2756            don't have a BMC here. */
2757         rv = try_get_dev_id(new_smi);
2758         if (rv) {
2759                 if (new_smi->addr_source)
2760                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2761                                " at this location\n");
2762                 goto out_err;
2763         }
2764
2765         setup_oem_data_handler(new_smi);
2766         setup_xaction_handlers(new_smi);
2767
2768         /* Try to claim any interrupts. */
2769         if (new_smi->irq_setup)
2770                 new_smi->irq_setup(new_smi);
2771
2772         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2773         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2774         new_smi->curr_msg = NULL;
2775         atomic_set(&new_smi->req_events, 0);
2776         new_smi->run_to_completion = 0;
2777
2778         new_smi->interrupt_disabled = 0;
2779         atomic_set(&new_smi->stop_operation, 0);
2780         new_smi->intf_num = smi_num;
2781         smi_num++;
2782
2783         /* Start clearing the flags before we enable interrupts or the
2784            timer to avoid racing with the timer. */
2785         start_clear_flags(new_smi);
2786         /* IRQ is defined to be set when non-zero. */
2787         if (new_smi->irq)
2788                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2789
2790         if (!new_smi->dev) {
2791                 /* If we don't already have a device from something
2792                  * else (like PCI), then register a new one. */
2793                 new_smi->pdev = platform_device_alloc("ipmi_si",
2794                                                       new_smi->intf_num);
2795                 if (rv) {
2796                         printk(KERN_ERR
2797                                "ipmi_si_intf:"
2798                                " Unable to allocate platform device\n");
2799                         goto out_err;
2800                 }
2801                 new_smi->dev = &new_smi->pdev->dev;
2802                 new_smi->dev->driver = &ipmi_driver;
2803
2804                 rv = platform_device_add(new_smi->pdev);
2805                 if (rv) {
2806                         printk(KERN_ERR
2807                                "ipmi_si_intf:"
2808                                " Unable to register system interface device:"
2809                                " %d\n",
2810                                rv);
2811                         goto out_err;
2812                 }
2813                 new_smi->dev_registered = 1;
2814         }
2815
2816         rv = ipmi_register_smi(&handlers,
2817                                new_smi,
2818                                &new_smi->device_id,
2819                                new_smi->dev,
2820                                "bmc",
2821                                new_smi->slave_addr);
2822         if (rv) {
2823                 printk(KERN_ERR
2824                        "ipmi_si: Unable to register device: error %d\n",
2825                        rv);
2826                 goto out_err_stop_timer;
2827         }
2828
2829         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2830                                      type_file_read_proc, NULL,
2831                                      new_smi, THIS_MODULE);
2832         if (rv) {
2833                 printk(KERN_ERR
2834                        "ipmi_si: Unable to create proc entry: %d\n",
2835                        rv);
2836                 goto out_err_stop_timer;
2837         }
2838
2839         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2840                                      stat_file_read_proc, NULL,
2841                                      new_smi, THIS_MODULE);
2842         if (rv) {
2843                 printk(KERN_ERR
2844                        "ipmi_si: Unable to create proc entry: %d\n",
2845                        rv);
2846                 goto out_err_stop_timer;
2847         }
2848
2849         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2850                                      param_read_proc, NULL,
2851                                      new_smi, THIS_MODULE);
2852         if (rv) {
2853                 printk(KERN_ERR
2854                        "ipmi_si: Unable to create proc entry: %d\n",
2855                        rv);
2856                 goto out_err_stop_timer;
2857         }
2858
2859         list_add_tail(&new_smi->link, &smi_infos);
2860
2861         mutex_unlock(&smi_infos_lock);
2862
2863         printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2864
2865         return 0;
2866
2867  out_err_stop_timer:
2868         atomic_inc(&new_smi->stop_operation);
2869         wait_for_timer_and_thread(new_smi);
2870
2871  out_err:
2872         if (new_smi->intf)
2873                 ipmi_unregister_smi(new_smi->intf);
2874
2875         if (new_smi->irq_cleanup)
2876                 new_smi->irq_cleanup(new_smi);
2877
2878         /* Wait until we know that we are out of any interrupt
2879            handlers might have been running before we freed the
2880            interrupt. */
2881         synchronize_sched();
2882
2883         if (new_smi->si_sm) {
2884                 if (new_smi->handlers)
2885                         new_smi->handlers->cleanup(new_smi->si_sm);
2886                 kfree(new_smi->si_sm);
2887         }
2888         if (new_smi->addr_source_cleanup)
2889                 new_smi->addr_source_cleanup(new_smi);
2890         if (new_smi->io_cleanup)
2891                 new_smi->io_cleanup(new_smi);
2892
2893         if (new_smi->dev_registered)
2894                 platform_device_unregister(new_smi->pdev);
2895
2896         kfree(new_smi);
2897
2898         mutex_unlock(&smi_infos_lock);
2899
2900         return rv;
2901 }
2902
2903 static __devinit int init_ipmi_si(void)
2904 {
2905         int  i;
2906         char *str;
2907         int  rv;
2908
2909         if (initialized)
2910                 return 0;
2911         initialized = 1;
2912
2913         /* Register the device drivers. */
2914         rv = driver_register(&ipmi_driver);
2915         if (rv) {
2916                 printk(KERN_ERR
2917                        "init_ipmi_si: Unable to register driver: %d\n",
2918                        rv);
2919                 return rv;
2920         }
2921
2922
2923         /* Parse out the si_type string into its components. */
2924         str = si_type_str;
2925         if (*str != '\0') {
2926                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2927                         si_type[i] = str;
2928                         str = strchr(str, ',');
2929                         if (str) {
2930                                 *str = '\0';
2931                                 str++;
2932                         } else {
2933                                 break;
2934                         }
2935                 }
2936         }
2937
2938         printk(KERN_INFO "IPMI System Interface driver.\n");
2939
2940         hardcode_find_bmc();
2941
2942 #ifdef CONFIG_DMI
2943         dmi_find_bmc();
2944 #endif
2945
2946 #ifdef CONFIG_ACPI
2947         acpi_find_bmc();
2948 #endif
2949
2950 #ifdef CONFIG_PCI
2951         rv = pci_register_driver(&ipmi_pci_driver);
2952         if (rv){
2953                 printk(KERN_ERR
2954                        "init_ipmi_si: Unable to register PCI driver: %d\n",
2955                        rv);
2956         }
2957 #endif
2958
2959 #ifdef CONFIG_PPC_OF
2960         of_register_platform_driver(&ipmi_of_platform_driver);
2961 #endif
2962
2963         if (si_trydefaults) {
2964                 mutex_lock(&smi_infos_lock);
2965                 if (list_empty(&smi_infos)) {
2966                         /* No BMC was found, try defaults. */
2967                         mutex_unlock(&smi_infos_lock);
2968                         default_find_bmc();
2969                 } else {
2970                         mutex_unlock(&smi_infos_lock);
2971                 }
2972         }
2973
2974         mutex_lock(&smi_infos_lock);
2975         if (unload_when_empty && list_empty(&smi_infos)) {
2976                 mutex_unlock(&smi_infos_lock);
2977 #ifdef CONFIG_PCI
2978                 pci_unregister_driver(&ipmi_pci_driver);
2979 #endif
2980
2981 #ifdef CONFIG_PPC_OF
2982                 of_unregister_platform_driver(&ipmi_of_platform_driver);
2983 #endif
2984                 driver_unregister(&ipmi_driver);
2985                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2986                 return -ENODEV;
2987         } else {
2988                 mutex_unlock(&smi_infos_lock);
2989                 return 0;
2990         }
2991 }
2992 module_init(init_ipmi_si);
2993
2994 static void cleanup_one_si(struct smi_info *to_clean)
2995 {
2996         int           rv;
2997         unsigned long flags;
2998
2999         if (!to_clean)
3000                 return;
3001
3002         list_del(&to_clean->link);
3003
3004         /* Tell the driver that we are shutting down. */
3005         atomic_inc(&to_clean->stop_operation);
3006
3007         /* Make sure the timer and thread are stopped and will not run
3008            again. */
3009         wait_for_timer_and_thread(to_clean);
3010
3011         /* Timeouts are stopped, now make sure the interrupts are off
3012            for the device.  A little tricky with locks to make sure
3013            there are no races. */
3014         spin_lock_irqsave(&to_clean->si_lock, flags);
3015         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3016                 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3017                 poll(to_clean);
3018                 schedule_timeout_uninterruptible(1);
3019                 spin_lock_irqsave(&to_clean->si_lock, flags);
3020         }
3021         disable_si_irq(to_clean);
3022         spin_unlock_irqrestore(&to_clean->si_lock, flags);
3023         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3024                 poll(to_clean);
3025                 schedule_timeout_uninterruptible(1);
3026         }
3027
3028         /* Clean up interrupts and make sure that everything is done. */
3029         if (to_clean->irq_cleanup)
3030                 to_clean->irq_cleanup(to_clean);
3031         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3032                 poll(to_clean);
3033                 schedule_timeout_uninterruptible(1);
3034         }
3035
3036         rv = ipmi_unregister_smi(to_clean->intf);
3037         if (rv) {
3038                 printk(KERN_ERR
3039                        "ipmi_si: Unable to unregister device: errno=%d\n",
3040                        rv);
3041         }
3042
3043         to_clean->handlers->cleanup(to_clean->si_sm);
3044
3045         kfree(to_clean->si_sm);
3046
3047         if (to_clean->addr_source_cleanup)
3048                 to_clean->addr_source_cleanup(to_clean);
3049         if (to_clean->io_cleanup)
3050                 to_clean->io_cleanup(to_clean);
3051
3052         if (to_clean->dev_registered)
3053                 platform_device_unregister(to_clean->pdev);
3054
3055         kfree(to_clean);
3056 }
3057
3058 static __exit void cleanup_ipmi_si(void)
3059 {
3060         struct smi_info *e, *tmp_e;
3061
3062         if (!initialized)
3063                 return;
3064
3065 #ifdef CONFIG_PCI
3066         pci_unregister_driver(&ipmi_pci_driver);
3067 #endif
3068
3069 #ifdef CONFIG_PPC_OF
3070         of_unregister_platform_driver(&ipmi_of_platform_driver);
3071 #endif
3072
3073         mutex_lock(&smi_infos_lock);
3074         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3075                 cleanup_one_si(e);
3076         mutex_unlock(&smi_infos_lock);
3077
3078         driver_unregister(&ipmi_driver);
3079 }
3080 module_exit(cleanup_ipmi_si);
3081
3082 MODULE_LICENSE("GPL");
3083 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3084 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");