]> err.no Git - linux-2.6/blobdiff - arch/x86/kernel/process_64.c
x86: idle wakeup event in the HLT loop
[linux-2.6] / arch / x86 / kernel / process_64.c
index 98956555450beff2c49d22eed79a63aecff87175..40fed477f3e5e488030a12735bcaa3b016dda28d 100644 (file)
@@ -1,6 +1,4 @@
 /*
- *  linux/arch/x86-64/kernel/process.c
- *
  *  Copyright (C) 1995  Linus Torvalds
  *
  *  Pentium III FXSR, SSE support
@@ -38,6 +36,7 @@
 #include <linux/notifier.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
+#include <linux/tick.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -117,9 +116,16 @@ static void default_idle(void)
        smp_mb();
        local_irq_disable();
        if (!need_resched()) {
-               /* Enables interrupts one instruction before HLT.
-                  x86 special cases this so there is no race. */
-               safe_halt();
+               ktime_t t0, t1;
+               u64 t0n, t1n;
+
+               t0 = ktime_get();
+               t0n = ktime_to_ns(t0);
+               safe_halt();    /* enables interrupts racelessly */
+               local_irq_disable();
+               t1 = ktime_get();
+               t1n = ktime_to_ns(t1);
+               sched_clock_idle_wakeup_event(t1n - t0n);
        } else
                local_irq_enable();
        current_thread_info()->status |= TS_POLLING;
@@ -136,6 +142,10 @@ static void poll_idle (void)
        cpu_relax();
 }
 
+static void do_nothing(void *unused)
+{
+}
+
 void cpu_idle_wait(void)
 {
        unsigned int cpu, this_cpu = get_cpu();
@@ -161,6 +171,13 @@ void cpu_idle_wait(void)
                                cpu_clear(cpu, map);
                }
                cpus_and(map, map, cpu_online_map);
+               /*
+                * We waited 1 sec, if a CPU still did not call idle
+                * it may be because it is in idle and not waking up
+                * because it has nothing to do.
+                * Give all the remaining CPUS a kick.
+                */
+               smp_call_function_mask(map, do_nothing, 0, 0);
        } while (!cpus_empty(map));
 
        set_cpus_allowed(current, tmp);
@@ -197,7 +214,7 @@ static inline void play_dead(void)
  * low exit latency (ie sit in a loop waiting for
  * somebody to say that they'd like to reschedule)
  */
-void cpu_idle (void)
+void cpu_idle(void)
 {
        current_thread_info()->status |= TS_POLLING;
        /* endless idle loop with no priority at all */
@@ -208,6 +225,8 @@ void cpu_idle (void)
                        if (__get_cpu_var(cpu_idle_state))
                                __get_cpu_var(cpu_idle_state) = 0;
 
+                       tick_nohz_stop_sched_tick();
+
                        rmb();
                        idle = pm_idle;
                        if (!idle)
@@ -228,6 +247,7 @@ void cpu_idle (void)
                        __exit_idle();
                }
 
+               tick_nohz_restart_sched_tick();
                preempt_enable_no_resched();
                schedule();
                preempt_disable();
@@ -579,7 +599,7 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
  *
  * Kprobes not supported here. Set the probe on schedule instead.
  */
-__kprobes struct task_struct *
+struct task_struct *
 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
        struct thread_struct *prev = &prev_p->thread,