]> err.no Git - linux-2.6/blobdiff - arch/x86/kernel/tsc_32.c
x86: printk kernel version in WARN_ON and other dump_stack users
[linux-2.6] / arch / x86 / kernel / tsc_32.c
index 9683f1d356013fdf4d5a0ea409483511aeb8d0de..9ebc0dab66b4ced5720818ce600b2dd14ccf0e18 100644 (file)
@@ -131,38 +131,43 @@ unsigned long native_calculate_cpu_khz(void)
 {
        unsigned long long start, end;
        unsigned long count;
-       u64 delta64;
+       u64 delta64 = (u64)ULLONG_MAX;
        int i;
        unsigned long flags;
 
        local_irq_save(flags);
 
-       /* run 3 times to ensure the cache is warm */
+       /* run 3 times to ensure the cache is warm and to get an accurate reading */
        for (i = 0; i < 3; i++) {
                mach_prepare_counter();
                rdtscll(start);
                mach_countup(&count);
                rdtscll(end);
-       }
-       /*
-        * Error: ECTCNEVERSET
-        * The CTC wasn't reliable: we got a hit on the very first read,
-        * or the CPU was so fast/slow that the quotient wouldn't fit in
-        * 32 bits..
-        */
-       if (count <= 1)
-               goto err;
 
-       delta64 = end - start;
+               /*
+                * Error: ECTCNEVERSET
+                * The CTC wasn't reliable: we got a hit on the very first read,
+                * or the CPU was so fast/slow that the quotient wouldn't fit in
+                * 32 bits..
+                */
+               if (count <= 1)
+                       continue;
+
+               /* cpu freq too slow: */
+               if ((end - start) <= CALIBRATE_TIME_MSEC)
+                       continue;
+
+               /*
+                * We want the minimum time of all runs in case one of them
+                * is inaccurate due to SMI or other delay
+                */
+               delta64 = min(delta64, (end - start));
+       }
 
-       /* cpu freq too fast: */
+       /* cpu freq too fast (or every run was bad): */
        if (delta64 > (1ULL<<32))
                goto err;
 
-       /* cpu freq too slow: */
-       if (delta64 <= CALIBRATE_TIME_MSEC)
-               goto err;
-
        delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
        do_div(delta64,CALIBRATE_TIME_MSEC);
 
@@ -181,8 +186,8 @@ int recalibrate_cpu_khz(void)
        if (cpu_has_tsc) {
                cpu_khz = calculate_cpu_khz();
                tsc_khz = cpu_khz;
-               cpu_data[0].loops_per_jiffy =
-                       cpufreq_scale(cpu_data[0].loops_per_jiffy,
+               cpu_data(0).loops_per_jiffy =
+                       cpufreq_scale(cpu_data(0).loops_per_jiffy,
                                        cpu_khz_old, cpu_khz);
                return 0;
        } else
@@ -215,7 +220,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
                        return 0;
                }
                ref_freq = freq->old;
-               loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
+               loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
                cpu_khz_ref = cpu_khz;
        }
 
@@ -223,7 +228,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
            (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
            (val == CPUFREQ_RESUMECHANGE)) {
                if (!(freq->flags & CPUFREQ_CONST_LOOPS))
-                       cpu_data[freq->cpu].loops_per_jiffy =
+                       cpu_data(freq->cpu).loops_per_jiffy =
                                cpufreq_scale(loops_per_jiffy_ref,
                                                ref_freq, freq->new);