2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
16 #include <linux/sysdev.h>
17 #include <linux/clocksource.h>
18 #include <linux/jiffies.h>
19 #include <linux/time.h>
20 #include <linux/tick.h>
24 * This read-write spinlock protects us from races in SMP while
25 * playing with xtime and avenrun.
27 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
32 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
33 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
34 * at zero at system boot time, so wall_to_monotonic will be negative,
35 * however, we will ALWAYS keep the tv_nsec part positive so we can use
36 * the usual normalization.
38 * wall_to_monotonic is moved after resume from suspend for the monotonic
39 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
40 * to get the real boot based time offset.
42 * - wall_to_monotonic is no longer the boot time, getboottime must be
45 struct timespec xtime __attribute__ ((aligned (16)));
46 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
47 static unsigned long total_sleep_time; /* seconds */
49 static struct timespec xtime_cache __attribute__ ((aligned (16)));
50 void update_xtime_cache(u64 nsec)
53 timespec_add_ns(&xtime_cache, nsec);
56 static struct clocksource *clock; /* pointer to current clocksource */
59 #ifdef CONFIG_GENERIC_TIME
61 * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
63 * private function, must hold xtime_lock lock when being
64 * called. Returns the number of nanoseconds since the
65 * last call to update_wall_time() (adjusted by NTP scaling)
67 static inline s64 __get_nsec_offset(void)
69 cycle_t cycle_now, cycle_delta;
72 /* read clocksource: */
73 cycle_now = clocksource_read(clock);
75 /* calculate the delta since the last update_wall_time: */
76 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
78 /* convert to nanoseconds: */
79 ns_offset = cyc2ns(clock, cycle_delta);
85 * getnstimeofday - Returns the time of day in a timespec
86 * @ts: pointer to the timespec to be set
88 * Returns the time of day in a timespec.
90 void getnstimeofday(struct timespec *ts)
96 seq = read_seqbegin(&xtime_lock);
99 nsecs = __get_nsec_offset();
101 } while (read_seqretry(&xtime_lock, seq));
103 timespec_add_ns(ts, nsecs);
106 EXPORT_SYMBOL(getnstimeofday);
109 * do_gettimeofday - Returns the time of day in a timeval
110 * @tv: pointer to the timeval to be set
112 * NOTE: Users should be converted to using getnstimeofday()
114 void do_gettimeofday(struct timeval *tv)
118 getnstimeofday(&now);
119 tv->tv_sec = now.tv_sec;
120 tv->tv_usec = now.tv_nsec/1000;
123 EXPORT_SYMBOL(do_gettimeofday);
125 * do_settimeofday - Sets the time of day
126 * @tv: pointer to the timespec variable containing the new time
128 * Sets the time of day to the new time and update NTP and notify hrtimers
130 int do_settimeofday(struct timespec *tv)
133 time_t wtm_sec, sec = tv->tv_sec;
134 long wtm_nsec, nsec = tv->tv_nsec;
136 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
139 write_seqlock_irqsave(&xtime_lock, flags);
141 nsec -= __get_nsec_offset();
143 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
144 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
146 set_normalized_timespec(&xtime, sec, nsec);
147 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
148 update_xtime_cache(0);
153 update_vsyscall(&xtime, clock);
155 write_sequnlock_irqrestore(&xtime_lock, flags);
157 /* signal hrtimers about time change */
163 EXPORT_SYMBOL(do_settimeofday);
166 * change_clocksource - Swaps clocksources if a new one is available
168 * Accumulates current time interval and initializes new clocksource
170 static void change_clocksource(void)
172 struct clocksource *new;
176 new = clocksource_get_next();
181 now = clocksource_read(new);
182 nsec = __get_nsec_offset();
183 timespec_add_ns(&xtime, nsec);
186 clock->cycle_last = now;
189 clock->xtime_nsec = 0;
190 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
195 * We're holding xtime lock and waking up klogd would deadlock
196 * us on enqueue. So no printing!
197 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
202 static inline void change_clocksource(void) { }
203 static inline s64 __get_nsec_offset(void) { return 0; }
207 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
209 int timekeeping_valid_for_hres(void)
215 seq = read_seqbegin(&xtime_lock);
217 ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
219 } while (read_seqretry(&xtime_lock, seq));
225 * read_persistent_clock - Return time in seconds from the persistent clock.
227 * Weak dummy function for arches that do not yet support it.
228 * Returns seconds from epoch using the battery backed persistent clock.
229 * Returns zero if unsupported.
231 * XXX - Do be sure to remove it once all arches implement it.
233 unsigned long __attribute__((weak)) read_persistent_clock(void)
239 * timekeeping_init - Initializes the clocksource and common timekeeping values
241 void __init timekeeping_init(void)
244 unsigned long sec = read_persistent_clock();
246 write_seqlock_irqsave(&xtime_lock, flags);
250 clock = clocksource_get_next();
251 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
252 clock->cycle_last = clocksource_read(clock);
256 set_normalized_timespec(&wall_to_monotonic,
257 -xtime.tv_sec, -xtime.tv_nsec);
258 update_xtime_cache(0);
259 total_sleep_time = 0;
260 write_sequnlock_irqrestore(&xtime_lock, flags);
263 /* flag for if timekeeping is suspended */
264 static int timekeeping_suspended;
265 /* time in seconds when suspend began */
266 static unsigned long timekeeping_suspend_time;
267 /* xtime offset when we went into suspend */
268 static s64 timekeeping_suspend_nsecs;
271 * timekeeping_resume - Resumes the generic timekeeping subsystem.
274 * This is for the generic clocksource timekeeping.
275 * xtime/wall_to_monotonic/jiffies/etc are
276 * still managed by arch specific suspend/resume code.
278 static int timekeeping_resume(struct sys_device *dev)
281 unsigned long now = read_persistent_clock();
283 clocksource_resume();
285 write_seqlock_irqsave(&xtime_lock, flags);
287 if (now && (now > timekeeping_suspend_time)) {
288 unsigned long sleep_length = now - timekeeping_suspend_time;
290 xtime.tv_sec += sleep_length;
291 wall_to_monotonic.tv_sec -= sleep_length;
292 total_sleep_time += sleep_length;
294 /* Make sure that we have the correct xtime reference */
295 timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
296 update_xtime_cache(0);
297 /* re-base the last cycle value */
298 clock->cycle_last = clocksource_read(clock);
300 timekeeping_suspended = 0;
301 write_sequnlock_irqrestore(&xtime_lock, flags);
303 touch_softlockup_watchdog();
305 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
307 /* Resume hrtimers */
308 hres_timers_resume();
313 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
317 timekeeping_suspend_time = read_persistent_clock();
319 write_seqlock_irqsave(&xtime_lock, flags);
320 /* Get the current xtime offset */
321 timekeeping_suspend_nsecs = __get_nsec_offset();
322 timekeeping_suspended = 1;
323 write_sequnlock_irqrestore(&xtime_lock, flags);
325 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
330 /* sysfs resume/suspend bits for timekeeping */
331 static struct sysdev_class timekeeping_sysclass = {
332 .name = "timekeeping",
333 .resume = timekeeping_resume,
334 .suspend = timekeeping_suspend,
337 static struct sys_device device_timer = {
339 .cls = &timekeeping_sysclass,
342 static int __init timekeeping_init_device(void)
344 int error = sysdev_class_register(&timekeeping_sysclass);
346 error = sysdev_register(&device_timer);
350 device_initcall(timekeeping_init_device);
353 * If the error is already larger, we look ahead even further
354 * to compensate for late or lost adjustments.
356 static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
364 * Use the current error value to determine how much to look ahead.
365 * The larger the error the slower we adjust for it to avoid problems
366 * with losing too many ticks, otherwise we would overadjust and
367 * produce an even larger error. The smaller the adjustment the
368 * faster we try to adjust for it, as lost ticks can do less harm
369 * here. This is tuned so that an error of about 1 msec is adjusted
370 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
372 error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
373 error2 = abs(error2);
374 for (look_ahead = 0; error2 > 0; look_ahead++)
378 * Now calculate the error in (1 << look_ahead) ticks, but first
379 * remove the single look ahead already included in the error.
381 tick_error = current_tick_length() >>
382 (TICK_LENGTH_SHIFT - clock->shift + 1);
383 tick_error -= clock->xtime_interval >> 1;
384 error = ((error - tick_error) >> look_ahead) + tick_error;
386 /* Finally calculate the adjustment shift value. */
391 *interval = -*interval;
395 for (adj = 0; error > i; adj++)
404 * Adjust the multiplier to reduce the error value,
405 * this is optimized for the most common adjustments of -1,0,1,
406 * for other values we can do a bit more work.
408 static void clocksource_adjust(s64 offset)
410 s64 error, interval = clock->cycle_interval;
413 error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
414 if (error > interval) {
416 if (likely(error <= interval))
419 adj = clocksource_bigadjust(error, &interval, &offset);
420 } else if (error < -interval) {
422 if (likely(error >= -interval)) {
424 interval = -interval;
427 adj = clocksource_bigadjust(error, &interval, &offset);
432 clock->xtime_interval += interval;
433 clock->xtime_nsec -= offset;
434 clock->error -= (interval - offset) <<
435 (TICK_LENGTH_SHIFT - clock->shift);
439 * update_wall_time - Uses the current clocksource to increment the wall time
441 * Called from the timer interrupt, must hold a write on xtime_lock.
443 void update_wall_time(void)
447 /* Make sure we're fully resumed: */
448 if (unlikely(timekeeping_suspended))
451 #ifdef CONFIG_GENERIC_TIME
452 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
454 offset = clock->cycle_interval;
456 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
458 /* normally this loop will run just once, however in the
459 * case of lost or late ticks, it will accumulate correctly.
461 while (offset >= clock->cycle_interval) {
462 /* accumulate one interval */
463 clock->xtime_nsec += clock->xtime_interval;
464 clock->cycle_last += clock->cycle_interval;
465 offset -= clock->cycle_interval;
467 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
468 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
473 /* accumulate error between NTP and clock interval */
474 clock->error += current_tick_length();
475 clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
478 /* correct the clock when NTP error is too big */
479 clocksource_adjust(offset);
481 /* store full nanoseconds into xtime */
482 xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
483 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
485 update_xtime_cache(cyc2ns(clock, offset));
487 /* check to see if there is a new clocksource to use */
488 change_clocksource();
489 update_vsyscall(&xtime, clock);
493 * getboottime - Return the real time of system boot.
494 * @ts: pointer to the timespec to be set
496 * Returns the time of day in a timespec.
498 * This is based on the wall_to_monotonic offset and the total suspend
499 * time. Calls to settimeofday will affect the value returned (which
500 * basically means that however wrong your real time clock is at boot time,
501 * you get the right time here).
503 void getboottime(struct timespec *ts)
505 set_normalized_timespec(ts,
506 - (wall_to_monotonic.tv_sec + total_sleep_time),
507 - wall_to_monotonic.tv_nsec);
511 * monotonic_to_bootbased - Convert the monotonic time to boot based.
512 * @ts: pointer to the timespec to be converted
514 void monotonic_to_bootbased(struct timespec *ts)
516 ts->tv_sec += total_sleep_time;
519 unsigned long get_seconds(void)
521 return xtime_cache.tv_sec;
523 EXPORT_SYMBOL(get_seconds);
526 struct timespec current_kernel_time(void)
532 seq = read_seqbegin(&xtime_lock);
535 } while (read_seqretry(&xtime_lock, seq));
539 EXPORT_SYMBOL(current_kernel_time);