From: Andi Kleen Date: Tue, 26 Sep 2006 08:52:40 +0000 (+0200) Subject: [PATCH] Fix idle notifiers X-Git-Tag: v2.6.19-rc1~1077^2~26 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=a15da49debaf7f09460a886b0ecd08588410715e;p=linux-2.6 [PATCH] Fix idle notifiers Previously exit_idle would be called more often than enter_idle Now instead of using complicated tests just keep track of it using the per CPU variable as a flip flop. I moved the idle state into the PDA to make the access more efficient. Original bug report and an initial patch from Stephane Eranian, but redone by AK. Cc: Stephane Eranian Signed-off-by: Andi Kleen --- diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 885c318f76..458006ae19 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -80,25 +80,25 @@ void idle_notifier_unregister(struct notifier_block *n) } EXPORT_SYMBOL(idle_notifier_unregister); -enum idle_state { CPU_IDLE, CPU_NOT_IDLE }; -static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE; - void enter_idle(void) { - __get_cpu_var(idle_state) = CPU_IDLE; + write_pda(isidle, 1); atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); } static void __exit_idle(void) { - __get_cpu_var(idle_state) = CPU_NOT_IDLE; + if (read_pda(isidle) == 0) + return; + write_pda(isidle, 0); atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); } /* Called from interrupts to signify idle end */ void exit_idle(void) { - if (current->pid | read_pda(irqcount)) + /* idle loop has pid 0 */ + if (current->pid) return; __exit_idle(); } @@ -220,6 +220,9 @@ void cpu_idle (void) play_dead(); enter_idle(); idle(); + /* In many cases the interrupt that ended idle + has already called exit_idle. But some idle + loops can be woken up without interrupt. */ __exit_idle(); } diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h index 531f48a6c3..14996d962b 100644 --- a/include/asm-x86_64/pda.h +++ b/include/asm-x86_64/pda.h @@ -25,7 +25,8 @@ struct x8664_pda { int nodenumber; /* number of current node */ unsigned int __softirq_pending; unsigned int __nmi_count; /* number of NMI on this CPUs */ - int mmu_state; + short mmu_state; + short isidle; struct mm_struct *active_mm; unsigned apic_timer_irqs; } ____cacheline_aligned_in_smp;