]> err.no Git - linux-2.6/blob - arch/x86/kernel/tsc_32.c
x86: fix TSC clock source calibration error
[linux-2.6] / arch / x86 / kernel / tsc_32.c
1 #include <linux/sched.h>
2 #include <linux/clocksource.h>
3 #include <linux/workqueue.h>
4 #include <linux/cpufreq.h>
5 #include <linux/jiffies.h>
6 #include <linux/init.h>
7 #include <linux/dmi.h>
8
9 #include <asm/delay.h>
10 #include <asm/tsc.h>
11 #include <asm/io.h>
12 #include <asm/timer.h>
13
14 #include "mach_timer.h"
15
16 static int tsc_enabled;
17
18 /*
19  * On some systems the TSC frequency does not
20  * change with the cpu frequency. So we need
21  * an extra value to store the TSC freq
22  */
23 unsigned int tsc_khz;
24 EXPORT_SYMBOL_GPL(tsc_khz);
25
26 int tsc_disable;
27
28 #ifdef CONFIG_X86_TSC
29 static int __init tsc_setup(char *str)
30 {
31         printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
32                                 "cannot disable TSC.\n");
33         return 1;
34 }
35 #else
36 /*
37  * disable flag for tsc. Takes effect by clearing the TSC cpu flag
38  * in cpu/common.c
39  */
40 static int __init tsc_setup(char *str)
41 {
42         tsc_disable = 1;
43
44         return 1;
45 }
46 #endif
47
48 __setup("notsc", tsc_setup);
49
50 /*
51  * code to mark and check if the TSC is unstable
52  * due to cpufreq or due to unsynced TSCs
53  */
54 static int tsc_unstable;
55
56 int check_tsc_unstable(void)
57 {
58         return tsc_unstable;
59 }
60 EXPORT_SYMBOL_GPL(check_tsc_unstable);
61
62 /* Accelerators for sched_clock()
63  * convert from cycles(64bits) => nanoseconds (64bits)
64  *  basic equation:
65  *              ns = cycles / (freq / ns_per_sec)
66  *              ns = cycles * (ns_per_sec / freq)
67  *              ns = cycles * (10^9 / (cpu_khz * 10^3))
68  *              ns = cycles * (10^6 / cpu_khz)
69  *
70  *      Then we use scaling math (suggested by george@mvista.com) to get:
71  *              ns = cycles * (10^6 * SC / cpu_khz) / SC
72  *              ns = cycles * cyc2ns_scale / SC
73  *
74  *      And since SC is a constant power of two, we can convert the div
75  *  into a shift.
76  *
77  *  We can use khz divisor instead of mhz to keep a better precision, since
78  *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
79  *  (mathieu.desnoyers@polymtl.ca)
80  *
81  *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
82  */
83 unsigned long cyc2ns_scale __read_mostly;
84
85 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
86
87 static inline void set_cyc2ns_scale(unsigned long cpu_khz)
88 {
89         cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
90 }
91
92 /*
93  * Scheduler clock - returns current time in nanosec units.
94  */
95 unsigned long long native_sched_clock(void)
96 {
97         unsigned long long this_offset;
98
99         /*
100          * Fall back to jiffies if there's no TSC available:
101          * ( But note that we still use it if the TSC is marked
102          *   unstable. We do this because unlike Time Of Day,
103          *   the scheduler clock tolerates small errors and it's
104          *   very important for it to be as fast as the platform
105          *   can achive it. )
106          */
107         if (unlikely(!tsc_enabled && !tsc_unstable))
108                 /* No locking but a rare wrong value is not a big deal: */
109                 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
110
111         /* read the Time Stamp Counter: */
112         rdtscll(this_offset);
113
114         /* return the value in ns */
115         return cycles_2_ns(this_offset);
116 }
117
118 /* We need to define a real function for sched_clock, to override the
119    weak default version */
120 #ifdef CONFIG_PARAVIRT
121 unsigned long long sched_clock(void)
122 {
123         return paravirt_sched_clock();
124 }
125 #else
126 unsigned long long sched_clock(void)
127         __attribute__((alias("native_sched_clock")));
128 #endif
129
130 unsigned long native_calculate_cpu_khz(void)
131 {
132         unsigned long long start, end;
133         unsigned long count;
134         u64 delta64 = (u64)ULLONG_MAX;
135         int i;
136         unsigned long flags;
137
138         local_irq_save(flags);
139
140         /* run 3 times to ensure the cache is warm */
141         for (i = 0; i < 3; i++) {
142                 mach_prepare_counter();
143                 rdtscll(start);
144                 mach_countup(&count);
145                 rdtscll(end);
146                 delta64 = min(delta64, (end - start));
147         }
148         /*
149          * Error: ECTCNEVERSET
150          * The CTC wasn't reliable: we got a hit on the very first read,
151          * or the CPU was so fast/slow that the quotient wouldn't fit in
152          * 32 bits..
153          */
154         if (count <= 1)
155                 goto err;
156
157         /* cpu freq too fast: */
158         if (delta64 > (1ULL<<32))
159                 goto err;
160
161         /* cpu freq too slow: */
162         if (delta64 <= CALIBRATE_TIME_MSEC)
163                 goto err;
164
165         delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
166         do_div(delta64,CALIBRATE_TIME_MSEC);
167
168         local_irq_restore(flags);
169         return (unsigned long)delta64;
170 err:
171         local_irq_restore(flags);
172         return 0;
173 }
174
175 int recalibrate_cpu_khz(void)
176 {
177 #ifndef CONFIG_SMP
178         unsigned long cpu_khz_old = cpu_khz;
179
180         if (cpu_has_tsc) {
181                 cpu_khz = calculate_cpu_khz();
182                 tsc_khz = cpu_khz;
183                 cpu_data(0).loops_per_jiffy =
184                         cpufreq_scale(cpu_data(0).loops_per_jiffy,
185                                         cpu_khz_old, cpu_khz);
186                 return 0;
187         } else
188                 return -ENODEV;
189 #else
190         return -ENODEV;
191 #endif
192 }
193
194 EXPORT_SYMBOL(recalibrate_cpu_khz);
195
196 #ifdef CONFIG_CPU_FREQ
197
198 /*
199  * if the CPU frequency is scaled, TSC-based delays will need a different
200  * loops_per_jiffy value to function properly.
201  */
202 static unsigned int ref_freq = 0;
203 static unsigned long loops_per_jiffy_ref = 0;
204 static unsigned long cpu_khz_ref = 0;
205
206 static int
207 time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
208 {
209         struct cpufreq_freqs *freq = data;
210
211         if (!ref_freq) {
212                 if (!freq->old){
213                         ref_freq = freq->new;
214                         return 0;
215                 }
216                 ref_freq = freq->old;
217                 loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
218                 cpu_khz_ref = cpu_khz;
219         }
220
221         if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
222             (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
223             (val == CPUFREQ_RESUMECHANGE)) {
224                 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
225                         cpu_data(freq->cpu).loops_per_jiffy =
226                                 cpufreq_scale(loops_per_jiffy_ref,
227                                                 ref_freq, freq->new);
228
229                 if (cpu_khz) {
230
231                         if (num_online_cpus() == 1)
232                                 cpu_khz = cpufreq_scale(cpu_khz_ref,
233                                                 ref_freq, freq->new);
234                         if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
235                                 tsc_khz = cpu_khz;
236                                 set_cyc2ns_scale(cpu_khz);
237                                 /*
238                                  * TSC based sched_clock turns
239                                  * to junk w/ cpufreq
240                                  */
241                                 mark_tsc_unstable("cpufreq changes");
242                         }
243                 }
244         }
245
246         return 0;
247 }
248
249 static struct notifier_block time_cpufreq_notifier_block = {
250         .notifier_call  = time_cpufreq_notifier
251 };
252
253 static int __init cpufreq_tsc(void)
254 {
255         return cpufreq_register_notifier(&time_cpufreq_notifier_block,
256                                          CPUFREQ_TRANSITION_NOTIFIER);
257 }
258 core_initcall(cpufreq_tsc);
259
260 #endif
261
262 /* clock source code */
263
264 static unsigned long current_tsc_khz = 0;
265
266 static cycle_t read_tsc(void)
267 {
268         cycle_t ret;
269
270         rdtscll(ret);
271
272         return ret;
273 }
274
275 static struct clocksource clocksource_tsc = {
276         .name                   = "tsc",
277         .rating                 = 300,
278         .read                   = read_tsc,
279         .mask                   = CLOCKSOURCE_MASK(64),
280         .mult                   = 0, /* to be set */
281         .shift                  = 22,
282         .flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
283                                   CLOCK_SOURCE_MUST_VERIFY,
284 };
285
286 void mark_tsc_unstable(char *reason)
287 {
288         if (!tsc_unstable) {
289                 tsc_unstable = 1;
290                 tsc_enabled = 0;
291                 printk("Marking TSC unstable due to: %s.\n", reason);
292                 /* Can be called before registration */
293                 if (clocksource_tsc.mult)
294                         clocksource_change_rating(&clocksource_tsc, 0);
295                 else
296                         clocksource_tsc.rating = 0;
297         }
298 }
299 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
300
301 static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
302 {
303         printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
304                        d->ident);
305         tsc_unstable = 1;
306         return 0;
307 }
308
309 /* List of systems that have known TSC problems */
310 static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
311         {
312          .callback = dmi_mark_tsc_unstable,
313          .ident = "IBM Thinkpad 380XD",
314          .matches = {
315                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
316                      DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
317                      },
318          },
319          {}
320 };
321
322 /*
323  * Make an educated guess if the TSC is trustworthy and synchronized
324  * over all CPUs.
325  */
326 __cpuinit int unsynchronized_tsc(void)
327 {
328         if (!cpu_has_tsc || tsc_unstable)
329                 return 1;
330         /*
331          * Intel systems are normally all synchronized.
332          * Exceptions must mark TSC as unstable:
333          */
334         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
335                 /* assume multi socket systems are not synchronized: */
336                 if (num_possible_cpus() > 1)
337                         tsc_unstable = 1;
338         }
339         return tsc_unstable;
340 }
341
342 /*
343  * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
344  */
345 #ifdef CONFIG_MGEODE_LX
346 /* RTSC counts during suspend */
347 #define RTSC_SUSP 0x100
348
349 static void __init check_geode_tsc_reliable(void)
350 {
351         unsigned long res_low, res_high;
352
353         rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
354         if (res_low & RTSC_SUSP)
355                 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
356 }
357 #else
358 static inline void check_geode_tsc_reliable(void) { }
359 #endif
360
361
362 void __init tsc_init(void)
363 {
364         if (!cpu_has_tsc || tsc_disable)
365                 goto out_no_tsc;
366
367         cpu_khz = calculate_cpu_khz();
368         tsc_khz = cpu_khz;
369
370         if (!cpu_khz)
371                 goto out_no_tsc;
372
373         printk("Detected %lu.%03lu MHz processor.\n",
374                                 (unsigned long)cpu_khz / 1000,
375                                 (unsigned long)cpu_khz % 1000);
376
377         set_cyc2ns_scale(cpu_khz);
378         use_tsc_delay();
379
380         /* Check and install the TSC clocksource */
381         dmi_check_system(bad_tsc_dmi_table);
382
383         unsynchronized_tsc();
384         check_geode_tsc_reliable();
385         current_tsc_khz = tsc_khz;
386         clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
387                                                         clocksource_tsc.shift);
388         /* lower the rating if we already know its unstable: */
389         if (check_tsc_unstable()) {
390                 clocksource_tsc.rating = 0;
391                 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
392         } else
393                 tsc_enabled = 1;
394
395         clocksource_register(&clocksource_tsc);
396
397         return;
398
399 out_no_tsc:
400         /*
401          * Set the tsc_disable flag if there's no TSC support, this
402          * makes it a fast flag for the kernel to see whether it
403          * should be using the TSC.
404          */
405         tsc_disable = 1;
406 }