2 * linux/arch/i386/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the lowest level x86-specific interrupt
7 * entry, irq-stacks and irq statistics code. All the remaining
8 * irq logic is done by the generic kernel/irq/ code and
9 * by the x86-specific irq controller code. (e.g. i8259.c and
13 #include <asm/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/notifier.h>
19 #include <linux/cpu.h>
20 #include <linux/delay.h>
22 DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
23 EXPORT_PER_CPU_SYMBOL(irq_stat);
25 #ifndef CONFIG_X86_LOCAL_APIC
27 * 'what should we do if we get a hw irq event on an illegal vector'.
28 * each architecture has to answer this themselves.
30 void ack_bad_irq(unsigned int irq)
32 printk("unexpected IRQ trap at vector %02x\n", irq);
36 #ifdef CONFIG_4KSTACKS
38 * per-CPU IRQ handling contexts (thread information and stack)
41 struct thread_info tinfo;
42 u32 stack[THREAD_SIZE/sizeof(u32)];
45 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
46 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
50 * do_IRQ handles all normal device IRQ's (the special
51 * SMP cross-CPU interrupts have their own specific
54 fastcall unsigned int do_IRQ(struct pt_regs *regs)
56 /* high bit used in ret_from_ code */
57 int irq = ~regs->orig_eax;
58 #ifdef CONFIG_4KSTACKS
59 union irq_ctx *curctx, *irqctx;
64 #ifdef CONFIG_DEBUG_STACKOVERFLOW
65 /* Debugging check for stack overflow: is there less than 1KB free? */
69 __asm__ __volatile__("andl %%esp,%0" :
70 "=r" (esp) : "0" (THREAD_SIZE - 1));
71 if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
72 printk("do_IRQ: stack overflow: %ld\n",
73 esp - sizeof(struct thread_info));
79 #ifdef CONFIG_4KSTACKS
81 curctx = (union irq_ctx *) current_thread_info();
82 irqctx = hardirq_ctx[smp_processor_id()];
85 * this is where we switch to the IRQ stack. However, if we are
86 * already using the IRQ stack (because we interrupted a hardirq
87 * handler) we can't do that and just have to keep using the
88 * current stack (which is the irq stack already after all)
90 if (curctx != irqctx) {
93 /* build the stack frame on the IRQ stack */
94 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
95 irqctx->tinfo.task = curctx->tinfo.task;
96 irqctx->tinfo.previous_esp = current_stack_pointer;
99 * Copy the softirq bits in preempt_count so that the
100 * softirq checks work in the hardirq context.
102 irqctx->tinfo.preempt_count =
103 irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK |
104 curctx->tinfo.preempt_count & SOFTIRQ_MASK;
107 " xchgl %%ebx,%%esp \n"
109 " movl %%ebx,%%esp \n"
110 : "=a" (arg1), "=d" (arg2), "=b" (ebx)
111 : "0" (irq), "1" (regs), "2" (isp)
112 : "memory", "cc", "ecx"
123 #ifdef CONFIG_4KSTACKS
126 * These should really be __section__(".bss.page_aligned") as well, but
127 * gcc's 3.0 and earlier don't handle that correctly.
129 static char softirq_stack[NR_CPUS * THREAD_SIZE]
130 __attribute__((__aligned__(THREAD_SIZE)));
132 static char hardirq_stack[NR_CPUS * THREAD_SIZE]
133 __attribute__((__aligned__(THREAD_SIZE)));
136 * allocate per-cpu stacks for hardirq and for softirq processing
138 void irq_ctx_init(int cpu)
140 union irq_ctx *irqctx;
142 if (hardirq_ctx[cpu])
145 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
146 irqctx->tinfo.task = NULL;
147 irqctx->tinfo.exec_domain = NULL;
148 irqctx->tinfo.cpu = cpu;
149 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
150 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
152 hardirq_ctx[cpu] = irqctx;
154 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
155 irqctx->tinfo.task = NULL;
156 irqctx->tinfo.exec_domain = NULL;
157 irqctx->tinfo.cpu = cpu;
158 irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET;
159 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
161 softirq_ctx[cpu] = irqctx;
163 printk("CPU %u irqstacks, hard=%p soft=%p\n",
164 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
167 void irq_ctx_exit(int cpu)
169 hardirq_ctx[cpu] = NULL;
172 extern asmlinkage void __do_softirq(void);
174 asmlinkage void do_softirq(void)
177 struct thread_info *curctx;
178 union irq_ctx *irqctx;
184 local_irq_save(flags);
186 if (local_softirq_pending()) {
187 curctx = current_thread_info();
188 irqctx = softirq_ctx[smp_processor_id()];
189 irqctx->tinfo.task = curctx->task;
190 irqctx->tinfo.previous_esp = current_stack_pointer;
192 /* build the stack frame on the softirq stack */
193 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
196 " xchgl %%ebx,%%esp \n"
197 " call __do_softirq \n"
198 " movl %%ebx,%%esp \n"
201 : "memory", "cc", "edx", "ecx", "eax"
205 local_irq_restore(flags);
208 EXPORT_SYMBOL(do_softirq);
212 * Interrupt statistics:
215 atomic_t irq_err_count;
218 * /proc/interrupts printing:
221 int show_interrupts(struct seq_file *p, void *v)
223 int i = *(loff_t *) v, j;
224 struct irqaction * action;
229 for_each_online_cpu(j)
230 seq_printf(p, "CPU%-8d",j);
235 spin_lock_irqsave(&irq_desc[i].lock, flags);
236 action = irq_desc[i].action;
239 seq_printf(p, "%3d: ",i);
241 seq_printf(p, "%10u ", kstat_irqs(i));
243 for_each_online_cpu(j)
244 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
246 seq_printf(p, " %14s", irq_desc[i].handler->typename);
247 seq_printf(p, " %s", action->name);
249 for (action=action->next; action; action = action->next)
250 seq_printf(p, ", %s", action->name);
254 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
255 } else if (i == NR_IRQS) {
256 seq_printf(p, "NMI: ");
257 for_each_online_cpu(j)
258 seq_printf(p, "%10u ", nmi_count(j));
260 #ifdef CONFIG_X86_LOCAL_APIC
261 seq_printf(p, "LOC: ");
262 for_each_online_cpu(j)
263 seq_printf(p, "%10u ",
264 per_cpu(irq_stat,j).apic_timer_irqs);
267 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
268 #if defined(CONFIG_X86_IO_APIC)
269 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
275 #ifdef CONFIG_HOTPLUG_CPU
276 #include <mach_apic.h>
278 void fixup_irqs(cpumask_t map)
283 for (irq = 0; irq < NR_IRQS; irq++) {
288 cpus_and(mask, irq_affinity[irq], map);
289 if (any_online_cpu(mask) == NR_CPUS) {
290 printk("Breaking affinity for irq %i\n", irq);
293 if (irq_desc[irq].handler->set_affinity)
294 irq_desc[irq].handler->set_affinity(irq, mask);
295 else if (irq_desc[irq].action && !(warned++))
296 printk("Cannot set affinity for irq %i\n", irq);
301 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
302 [note the nop - the interrupt-enable boundary on x86 is two
303 instructions from sti] - to flush out pending hardirqs and
304 IPIs. After this point nothing is supposed to reach this CPU." */
305 __asm__ __volatile__("sti; nop; cli");
308 /* That doesn't seem sufficient. Give it 1ms. */