2 * Copyright 2001 MontaVista Software Inc.
3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
4 * Copyright (c) 2003, 2004 Maciej W. Rozycki
6 * Common time service routines for MIPS machines. See
7 * Documentation/mips/time.README.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/param.h>
19 #include <linux/time.h>
20 #include <linux/timex.h>
21 #include <linux/smp.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
27 #include <asm/bootinfo.h>
28 #include <asm/cache.h>
29 #include <asm/compiler.h>
31 #include <asm/cpu-features.h>
32 #include <asm/div64.h>
33 #include <asm/sections.h>
37 * The integer part of the number of usecs per jiffy is taken from tick,
38 * but the fractional part is not recorded, so we calculate it using the
39 * initial value of HZ. This aids systems where tick isn't really an
40 * integer (e.g. for HZ = 128).
42 #define USECS_PER_JIFFY TICK_SIZE
43 #define USECS_PER_JIFFY_FRAC ((unsigned long)(u32)((1000000ULL << 32) / HZ))
45 #define TICK_SIZE (tick_nsec / 1000)
50 extern volatile unsigned long wall_jiffies;
52 DEFINE_SPINLOCK(rtc_lock);
55 * By default we provide the null RTC ops
57 static unsigned long null_rtc_get_time(void)
59 return mktime(2000, 1, 1, 0, 0, 0);
62 static int null_rtc_set_time(unsigned long sec)
67 unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time;
68 int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time;
69 int (*rtc_mips_set_mmss)(unsigned long);
72 /* usecs per counter cycle, shifted to left by 32 bits */
73 static unsigned int sll32_usecs_per_cycle;
75 /* how many counter cycles in a jiffy */
76 static unsigned long cycles_per_jiffy __read_mostly;
78 /* Cycle counter value at the previous timer interrupt.. */
79 static unsigned int timerhi, timerlo;
81 /* expirelo is the count value for next CPU timer interrupt */
82 static unsigned int expirelo;
86 * Null timer ack for systems not needing one (e.g. i8254).
88 static void null_timer_ack(void) { /* nothing */ }
91 * Null high precision timer functions for systems lacking one.
93 static unsigned int null_hpt_read(void)
98 static void null_hpt_init(unsigned int count)
105 * Timer ack for an R4k-compatible timer of a known frequency.
107 static void c0_timer_ack(void)
111 #ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */
112 /* Ack this timer interrupt and set the next one. */
113 expirelo += cycles_per_jiffy;
115 write_c0_compare(expirelo);
117 /* Check to see if we have missed any timer interrupts. */
118 while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
119 /* missed_timer_count++; */
120 expirelo = count + cycles_per_jiffy;
121 write_c0_compare(expirelo);
126 * High precision timer functions for a R4k-compatible timer.
128 static unsigned int c0_hpt_read(void)
130 return read_c0_count();
133 /* For use solely as a high precision timer. */
134 static void c0_hpt_init(unsigned int count)
136 write_c0_count(read_c0_count() - count);
139 /* For use both as a high precision timer and an interrupt source. */
140 static void c0_hpt_timer_init(unsigned int count)
142 count = read_c0_count() - count;
143 expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
144 write_c0_count(expirelo - cycles_per_jiffy);
145 write_c0_compare(expirelo);
146 write_c0_count(count);
149 int (*mips_timer_state)(void);
150 void (*mips_timer_ack)(void);
151 unsigned int (*mips_hpt_read)(void);
152 void (*mips_hpt_init)(unsigned int);
156 * This version of gettimeofday has microsecond resolution and better than
157 * microsecond precision on fast machines with cycle counter.
159 void do_gettimeofday(struct timeval *tv)
163 unsigned long usec, sec;
164 unsigned long max_ntp_tick;
167 seq = read_seqbegin(&xtime_lock);
169 usec = do_gettimeoffset();
171 lost = jiffies - wall_jiffies;
174 * If time_adjust is negative then NTP is slowing the clock
175 * so make sure not to go into next possible interval.
176 * Better to lose some accuracy than have time go backwards..
178 if (unlikely(time_adjust < 0)) {
179 max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
180 usec = min(usec, max_ntp_tick);
183 usec += lost * max_ntp_tick;
184 } else if (unlikely(lost))
185 usec += lost * (USEC_PER_SEC / HZ);
188 usec += (xtime.tv_nsec / 1000);
190 } while (read_seqretry(&xtime_lock, seq));
192 while (usec >= 1000000) {
201 EXPORT_SYMBOL(do_gettimeofday);
203 int do_settimeofday(struct timespec *tv)
205 time_t wtm_sec, sec = tv->tv_sec;
206 long wtm_nsec, nsec = tv->tv_nsec;
208 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
211 write_seqlock_irq(&xtime_lock);
214 * This is revolting. We need to set "xtime" correctly. However,
215 * the value in this location is the value at the most recent update
216 * of wall time. Discover what correction gettimeofday() would have
217 * made, and then undo it!
219 nsec -= do_gettimeoffset() * NSEC_PER_USEC;
220 nsec -= (jiffies - wall_jiffies) * tick_nsec;
222 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
223 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
225 set_normalized_timespec(&xtime, sec, nsec);
226 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
229 write_sequnlock_irq(&xtime_lock);
234 EXPORT_SYMBOL(do_settimeofday);
237 * Gettimeoffset routines. These routines returns the time duration
238 * since last timer interrupt in usecs.
240 * If the exact CPU counter frequency is known, use fixed_rate_gettimeoffset.
241 * Otherwise use calibrate_gettimeoffset()
243 * If the CPU does not have the counter register, you can either supply
244 * your own gettimeoffset() routine, or use null_gettimeoffset(), which
245 * gives the same resolution as HZ.
248 static unsigned long null_gettimeoffset(void)
254 /* The function pointer to one of the gettimeoffset funcs. */
255 unsigned long (*do_gettimeoffset)(void) = null_gettimeoffset;
258 static unsigned long fixed_rate_gettimeoffset(void)
263 /* Get last timer tick in absolute kernel time */
264 count = mips_hpt_read();
266 /* .. relative to previous jiffy (32 bits is enough) */
269 __asm__("multu %1,%2"
271 : "r" (count), "r" (sll32_usecs_per_cycle)
272 : "lo", GCC_REG_ACCUM);
275 * Due to possible jiffies inconsistencies, we need to check
276 * the result so that we'll get a timer that is monotonic.
278 if (res >= USECS_PER_JIFFY)
279 res = USECS_PER_JIFFY - 1;
286 * Cached "1/(clocks per usec) * 2^32" value.
287 * It has to be recalculated once each jiffy.
289 static unsigned long cached_quotient;
291 /* Last jiffy when calibrate_divXX_gettimeoffset() was called. */
292 static unsigned long last_jiffies;
295 * This is moved from dec/time.c:do_ioasic_gettimeoffset() by Maciej.
297 static unsigned long calibrate_div32_gettimeoffset(void)
300 unsigned long res, tmp;
301 unsigned long quotient;
305 quotient = cached_quotient;
307 if (last_jiffies != tmp) {
309 if (last_jiffies != 0) {
311 do_div64_32(r0, timerhi, timerlo, tmp);
312 do_div64_32(quotient, USECS_PER_JIFFY,
313 USECS_PER_JIFFY_FRAC, r0);
314 cached_quotient = quotient;
318 /* Get last timer tick in absolute kernel time */
319 count = mips_hpt_read();
321 /* .. relative to previous jiffy (32 bits is enough) */
324 __asm__("multu %1,%2"
326 : "r" (count), "r" (quotient)
327 : "lo", GCC_REG_ACCUM);
330 * Due to possible jiffies inconsistencies, we need to check
331 * the result so that we'll get a timer that is monotonic.
333 if (res >= USECS_PER_JIFFY)
334 res = USECS_PER_JIFFY - 1;
339 static unsigned long calibrate_div64_gettimeoffset(void)
342 unsigned long res, tmp;
343 unsigned long quotient;
347 quotient = cached_quotient;
349 if (last_jiffies != tmp) {
353 __asm__(".set push\n\t"
365 : "=&r" (quotient), "=&r" (r0)
366 : "r" (timerhi), "m" (timerlo),
367 "r" (tmp), "r" (USECS_PER_JIFFY),
368 "r" (USECS_PER_JIFFY_FRAC)
369 : "hi", "lo", GCC_REG_ACCUM);
370 cached_quotient = quotient;
374 /* Get last timer tick in absolute kernel time */
375 count = mips_hpt_read();
377 /* .. relative to previous jiffy (32 bits is enough) */
380 __asm__("multu %1,%2"
382 : "r" (count), "r" (quotient)
383 : "lo", GCC_REG_ACCUM);
386 * Due to possible jiffies inconsistencies, we need to check
387 * the result so that we'll get a timer that is monotonic.
389 if (res >= USECS_PER_JIFFY)
390 res = USECS_PER_JIFFY - 1;
396 /* last time when xtime and rtc are sync'ed up */
397 static long last_rtc_update;
400 * local_timer_interrupt() does profiling and process accounting
401 * on a per-CPU basis.
403 * In UP mode, it is invoked from the (global) timer_interrupt.
405 * In SMP mode, it might invoked by per-CPU timer interrupt, or
406 * a broadcasted inter-processor interrupt which itself is triggered
407 * by the global timer interrupt.
409 void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
412 profile_tick(CPU_PROFILING, regs);
413 update_process_times(user_mode(regs));
417 * High-level timer interrupt service routines. This function
418 * is set as irqaction->handler and is invoked through do_IRQ.
420 irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
425 write_seqlock(&xtime_lock);
427 count = mips_hpt_read();
430 /* Update timerhi/timerlo for intra-jiffy calibration. */
431 timerhi += count < timerlo; /* Wrap around */
435 * call the generic timer interrupt handling
440 * If we have an externally synchronized Linux clock, then update
441 * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be
442 * called as close as possible to 500 ms before the new second starts.
445 xtime.tv_sec > last_rtc_update + 660 &&
446 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
447 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
448 if (rtc_mips_set_mmss(xtime.tv_sec) == 0) {
449 last_rtc_update = xtime.tv_sec;
451 /* do it again in 60 s */
452 last_rtc_update = xtime.tv_sec - 600;
457 * If jiffies has overflown in this timer_interrupt, we must
458 * update the timer[hi]/[lo] to make fast gettimeoffset funcs
459 * quotient calc still valid. -arca
461 * The first timer interrupt comes late as interrupts are
462 * enabled long after timers are initialized. Therefore the
463 * high precision timer is fast, leading to wrong gettimeoffset()
464 * calculations. We deal with it by setting it based on the
465 * number of its ticks between the second and the third interrupt.
466 * That is still somewhat imprecise, but it's a good estimate.
471 static unsigned int prev_count;
472 static int hpt_initialized;
476 timerhi = timerlo = 0;
477 mips_hpt_init(count);
483 if (!hpt_initialized) {
484 unsigned int c3 = 3 * (count - prev_count);
488 mips_hpt_init(count - c3);
497 write_sequnlock(&xtime_lock);
500 * In UP mode, we call local_timer_interrupt() to do profiling
501 * and process accouting.
503 * In SMP mode, local_timer_interrupt() is invoked by appropriate
504 * low-level local timer interrupt handler.
506 local_timer_interrupt(irq, dev_id, regs);
511 int null_perf_irq(struct pt_regs *regs)
516 int (*perf_irq)(struct pt_regs *regs) = null_perf_irq;
518 EXPORT_SYMBOL(null_perf_irq);
519 EXPORT_SYMBOL(perf_irq);
521 asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs)
523 int r2 = cpu_has_mips_r2;
526 kstat_this_cpu.irqs[irq]++;
530 * Before R2 of the architecture there was no way to see if a
531 * performance counter interrupt was pending, so we have to run the
532 * performance counter interrupt handler anyway.
534 if (!r2 || (read_c0_cause() & (1 << 26)))
538 /* we keep interrupt disabled all the time */
539 if (!r2 || (read_c0_cause() & (1 << 30)))
540 timer_interrupt(irq, NULL, regs);
546 asmlinkage void ll_local_timer_interrupt(int irq, struct pt_regs *regs)
549 if (smp_processor_id() != 0)
550 kstat_this_cpu.irqs[irq]++;
552 /* we keep interrupt disabled all the time */
553 local_timer_interrupt(irq, NULL, regs);
559 * time_init() - it does the following things.
561 * 1) board_time_init() -
562 * a) (optional) set up RTC routines,
563 * b) (optional) calibrate and set the mips_hpt_frequency
564 * (only needed if you intended to use fixed_rate_gettimeoffset
565 * or use cpu counter as timer interrupt source)
566 * 2) setup xtime based on rtc_mips_get_time().
567 * 3) choose a appropriate gettimeoffset routine.
568 * 4) calculate a couple of cached variables for later usage
569 * 5) board_timer_setup() -
570 * a) (optional) over-write any choices made above by time_init().
571 * b) machine specific code should setup the timer irqaction.
572 * c) enable the timer interrupt
575 void (*board_time_init)(void);
576 void (*board_timer_setup)(struct irqaction *irq);
578 unsigned int mips_hpt_frequency;
580 static struct irqaction timer_irqaction = {
581 .handler = timer_interrupt,
582 .flags = IRQF_DISABLED,
586 static unsigned int __init calibrate_hpt(void)
589 u32 hpt_start, hpt_end, hpt_count, hz;
591 const int loops = HZ / 10;
596 * We want to calibrate for 0.1s, but to avoid a 64-bit
597 * division we round the number of loops up to the nearest
600 while (loops > 1 << log_2_loops)
602 i = 1 << log_2_loops;
605 * Wait for a rising edge of the timer interrupt.
607 while (mips_timer_state());
608 while (!mips_timer_state());
611 * Now see how many high precision timer ticks happen
612 * during the calculated number of periods between timer
615 hpt_start = mips_hpt_read();
617 while (mips_timer_state());
618 while (!mips_timer_state());
620 hpt_end = mips_hpt_read();
622 hpt_count = hpt_end - hpt_start;
624 frequency = (u64)hpt_count * (u64)hz;
626 return frequency >> log_2_loops;
629 void __init time_init(void)
634 if (!rtc_mips_set_mmss)
635 rtc_mips_set_mmss = rtc_mips_set_time;
637 xtime.tv_sec = rtc_mips_get_time();
640 set_normalized_timespec(&wall_to_monotonic,
641 -xtime.tv_sec, -xtime.tv_nsec);
643 /* Choose appropriate high precision timer routines. */
644 if (!cpu_has_counter && !mips_hpt_read) {
645 /* No high precision timer -- sorry. */
646 mips_hpt_read = null_hpt_read;
647 mips_hpt_init = null_hpt_init;
648 } else if (!mips_hpt_frequency && !mips_timer_state) {
649 /* A high precision timer of unknown frequency. */
650 if (!mips_hpt_read) {
651 /* No external high precision timer -- use R4k. */
652 mips_hpt_read = c0_hpt_read;
653 mips_hpt_init = c0_hpt_init;
656 if (cpu_has_mips32r1 || cpu_has_mips32r2 ||
657 (current_cpu_data.isa_level == MIPS_CPU_ISA_I) ||
658 (current_cpu_data.isa_level == MIPS_CPU_ISA_II))
660 * We need to calibrate the counter but we don't have
663 do_gettimeoffset = calibrate_div32_gettimeoffset;
666 * We need to calibrate the counter but we *do* have
669 do_gettimeoffset = calibrate_div64_gettimeoffset;
671 /* We know counter frequency. Or we can get it. */
672 if (!mips_hpt_read) {
673 /* No external high precision timer -- use R4k. */
674 mips_hpt_read = c0_hpt_read;
676 if (mips_timer_state)
677 mips_hpt_init = c0_hpt_init;
679 /* No external timer interrupt -- use R4k. */
680 mips_hpt_init = c0_hpt_timer_init;
681 mips_timer_ack = c0_timer_ack;
684 if (!mips_hpt_frequency)
685 mips_hpt_frequency = calibrate_hpt();
687 do_gettimeoffset = fixed_rate_gettimeoffset;
689 /* Calculate cache parameters. */
690 cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ;
692 /* sll32_usecs_per_cycle = 10^6 * 2^32 / mips_counter_freq */
693 do_div64_32(sll32_usecs_per_cycle,
694 1000000, mips_hpt_frequency / 2,
697 /* Report the high precision timer rate for a reference. */
698 printk("Using %u.%03u MHz high precision timer.\n",
699 ((mips_hpt_frequency + 500) / 1000) / 1000,
700 ((mips_hpt_frequency + 500) / 1000) % 1000);
704 /* No timer interrupt ack (e.g. i8254). */
705 mips_timer_ack = null_timer_ack;
707 /* This sets up the high precision timer for the first interrupt. */
708 mips_hpt_init(mips_hpt_read());
711 * Call board specific timer interrupt setup.
713 * this pointer must be setup in machine setup routine.
715 * Even if a machine chooses to use a low-level timer interrupt,
716 * it still needs to setup the timer_irqaction.
717 * In that case, it might be better to set timer_irqaction.handler
718 * to be NULL function so that we are sure the high-level code
719 * is not invoked accidentally.
721 board_timer_setup(&timer_irqaction);
725 #define STARTOFTIME 1970
726 #define SECDAY 86400L
727 #define SECYR (SECDAY * 365)
728 #define leapyear(y) ((!((y) % 4) && ((y) % 100)) || !((y) % 400))
729 #define days_in_year(y) (leapyear(y) ? 366 : 365)
730 #define days_in_month(m) (month_days[(m) - 1])
732 static int month_days[12] = {
733 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
736 void to_tm(unsigned long tim, struct rtc_time *tm)
741 gday = day = tim / SECDAY;
744 /* Hours, minutes, seconds are easy */
745 tm->tm_hour = hms / 3600;
746 tm->tm_min = (hms % 3600) / 60;
747 tm->tm_sec = (hms % 3600) % 60;
749 /* Number of years in days */
750 for (i = STARTOFTIME; day >= days_in_year(i); i++)
751 day -= days_in_year(i);
754 /* Number of months in days left */
755 if (leapyear(tm->tm_year))
756 days_in_month(FEBRUARY) = 29;
757 for (i = 1; day >= days_in_month(i); i++)
758 day -= days_in_month(i);
759 days_in_month(FEBRUARY) = 28;
760 tm->tm_mon = i - 1; /* tm_mon starts from 0 to 11 */
762 /* Days are what is left over (+1) from all that. */
763 tm->tm_mday = day + 1;
766 * Determine the day of week
768 tm->tm_wday = (gday + 4) % 7; /* 1970/1/1 was Thursday */
771 EXPORT_SYMBOL(rtc_lock);
772 EXPORT_SYMBOL(to_tm);
773 EXPORT_SYMBOL(rtc_mips_set_time);
774 EXPORT_SYMBOL(rtc_mips_get_time);
776 unsigned long long sched_clock(void)
778 return (unsigned long long)jiffies*(1000000000/HZ);