]> err.no Git - linux-2.6/blobdiff - arch/i386/kernel/tsc.c
[PATCH] vmi: cpu cycles fix
[linux-2.6] / arch / i386 / kernel / tsc.c
index 0fd93107ff9a6d3fa75d9ab1438faff039cef537..59222a04234b929cb98b086caaad46f021b259df 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/delay.h>
 #include <asm/tsc.h>
 #include <asm/io.h>
+#include <asm/timer.h>
 
 #include "mach_timer.h"
 
@@ -60,12 +61,6 @@ static inline int check_tsc_unstable(void)
        return tsc_unstable;
 }
 
-void mark_tsc_unstable(void)
-{
-       tsc_unstable = 1;
-}
-EXPORT_SYMBOL_GPL(mark_tsc_unstable);
-
 /* Accellerators for sched_clock()
  * convert from cycles(64bits) => nanoseconds (64bits)
  *  basic equation:
@@ -108,9 +103,6 @@ unsigned long long sched_clock(void)
 {
        unsigned long long this_offset;
 
-       if (unlikely(custom_sched_clock))
-               return (*custom_sched_clock)();
-
        /*
         * Fall back to jiffies if there's no TSC available:
         */
@@ -119,13 +111,13 @@ unsigned long long sched_clock(void)
                return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
 
        /* read the Time Stamp Counter: */
-       rdtscll(this_offset);
+       get_scheduled_cycles(this_offset);
 
        /* return the value in ns */
        return cycles_2_ns(this_offset);
 }
 
-static unsigned long calculate_cpu_khz(void)
+unsigned long native_calculate_cpu_khz(void)
 {
        unsigned long long start, end;
        unsigned long count;
@@ -295,7 +287,6 @@ core_initcall(cpufreq_tsc);
 /* clock source code */
 
 static unsigned long current_tsc_khz = 0;
-static int tsc_update_callback(void);
 
 static cycle_t read_tsc(void)
 {
@@ -313,36 +304,28 @@ static struct clocksource clocksource_tsc = {
        .mask                   = CLOCKSOURCE_MASK(64),
        .mult                   = 0, /* to be set */
        .shift                  = 22,
-       .update_callback        = tsc_update_callback,
-       .is_continuous          = 1,
+       .flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
+                                 CLOCK_SOURCE_MUST_VERIFY,
 };
 
-static int tsc_update_callback(void)
+void mark_tsc_unstable(void)
 {
-       int change = 0;
-
-       /* check to see if we should switch to the safe clocksource: */
-       if (clocksource_tsc.rating != 0 && check_tsc_unstable()) {
-               clocksource_change_rating(&clocksource_tsc, 0);
-               change = 1;
-       }
-
-       /* only update if tsc_khz has changed: */
-       if (current_tsc_khz != tsc_khz) {
-               current_tsc_khz = tsc_khz;
-               clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
-                                                       clocksource_tsc.shift);
-               change = 1;
+       if (!tsc_unstable) {
+               tsc_unstable = 1;
+               /* Can be called before registration */
+               if (clocksource_tsc.mult)
+                       clocksource_change_rating(&clocksource_tsc, 0);
+               else
+                       clocksource_tsc.rating = 0;
        }
-
-       return change;
 }
+EXPORT_SYMBOL_GPL(mark_tsc_unstable);
 
 static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
 {
        printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
                       d->ident);
-       mark_tsc_unstable();
+       tsc_unstable = 1;
        return 0;
 }
 
@@ -359,49 +342,6 @@ static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
         {}
 };
 
-#define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */
-static struct timer_list verify_tsc_freq_timer;
-
-/* XXX - Probably should add locking */
-static void verify_tsc_freq(unsigned long unused)
-{
-       static u64 last_tsc;
-       static unsigned long last_jiffies;
-
-       u64 now_tsc, interval_tsc;
-       unsigned long now_jiffies, interval_jiffies;
-
-
-       if (check_tsc_unstable())
-               return;
-
-       rdtscll(now_tsc);
-       now_jiffies = jiffies;
-
-       if (!last_jiffies) {
-               goto out;
-       }
-
-       interval_jiffies = now_jiffies - last_jiffies;
-       interval_tsc = now_tsc - last_tsc;
-       interval_tsc *= HZ;
-       do_div(interval_tsc, cpu_khz*1000);
-
-       if (interval_tsc < (interval_jiffies * 3 / 4)) {
-               printk("TSC appears to be running slowly. "
-                       "Marking it as unstable\n");
-               mark_tsc_unstable();
-               return;
-       }
-
-out:
-       last_tsc = now_tsc;
-       last_jiffies = now_jiffies;
-       /* set us up to go off on the next interval: */
-       mod_timer(&verify_tsc_freq_timer,
-               jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL));
-}
-
 /*
  * Make an educated guess if the TSC is trustworthy and synchronized
  * over all CPUs.
@@ -414,12 +354,32 @@ __cpuinit int unsynchronized_tsc(void)
         * Intel systems are normally all synchronized.
         * Exceptions must mark TSC as unstable:
         */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-               return 0;
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
+               /* assume multi socket systems are not synchronized: */
+               if (num_possible_cpus() > 1)
+                       tsc_unstable = 1;
+       }
+       return tsc_unstable;
+}
+
+/*
+ * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
+ */
+#ifdef CONFIG_MGEODE_LX
+/* RTSC counts during suspend */
+#define RTSC_SUSP 0x100
 
-       /* assume multi socket systems are not synchronized: */
-       return num_possible_cpus() > 1;
+static void __init check_geode_tsc_reliable(void)
+{
+       unsigned long val;
+
+       rdmsrl(MSR_GEODE_BUSCONT_CONF0, val);
+       if ((val & RTSC_SUSP))
+               clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
 }
+#else
+static inline void check_geode_tsc_reliable(void) { }
+#endif
 
 static int __init init_tsc_clocksource(void)
 {
@@ -428,20 +388,16 @@ static int __init init_tsc_clocksource(void)
                /* check blacklist */
                dmi_check_system(bad_tsc_dmi_table);
 
-               if (unsynchronized_tsc()) /* mark unstable if unsynced */
-                       mark_tsc_unstable();
+               unsynchronized_tsc();
+               check_geode_tsc_reliable();
                current_tsc_khz = tsc_khz;
                clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
                                                        clocksource_tsc.shift);
                /* lower the rating if we already know its unstable: */
-               if (check_tsc_unstable())
+               if (check_tsc_unstable()) {
                        clocksource_tsc.rating = 0;
-
-               init_timer(&verify_tsc_freq_timer);
-               verify_tsc_freq_timer.function = verify_tsc_freq;
-               verify_tsc_freq_timer.expires =
-                       jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL);
-               add_timer(&verify_tsc_freq_timer);
+                       clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
+               }
 
                return clocksource_register(&clocksource_tsc);
        }