4 * Kernel internal timers, kernel timekeeping, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/delay.h>
38 #include <asm/uaccess.h>
39 #include <asm/unistd.h>
40 #include <asm/div64.h>
41 #include <asm/timex.h>
44 #ifdef CONFIG_TIME_INTERPOLATION
45 static void time_interpolator_update(long delta_nsec);
47 #define time_interpolator_update(x)
50 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
52 EXPORT_SYMBOL(jiffies_64);
55 * per-CPU timer vector definitions:
57 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
58 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
59 #define TVN_SIZE (1 << TVN_BITS)
60 #define TVR_SIZE (1 << TVR_BITS)
61 #define TVN_MASK (TVN_SIZE - 1)
62 #define TVR_MASK (TVR_SIZE - 1)
64 typedef struct tvec_s {
65 struct list_head vec[TVN_SIZE];
68 typedef struct tvec_root_s {
69 struct list_head vec[TVR_SIZE];
72 struct tvec_t_base_s {
74 struct timer_list *running_timer;
75 unsigned long timer_jiffies;
81 } ____cacheline_aligned_in_smp;
83 typedef struct tvec_t_base_s tvec_base_t;
85 tvec_base_t boot_tvec_bases;
86 EXPORT_SYMBOL(boot_tvec_bases);
87 static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
89 static inline void set_running_timer(tvec_base_t *base,
90 struct timer_list *timer)
93 base->running_timer = timer;
97 static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
99 unsigned long expires = timer->expires;
100 unsigned long idx = expires - base->timer_jiffies;
101 struct list_head *vec;
103 if (idx < TVR_SIZE) {
104 int i = expires & TVR_MASK;
105 vec = base->tv1.vec + i;
106 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
107 int i = (expires >> TVR_BITS) & TVN_MASK;
108 vec = base->tv2.vec + i;
109 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
110 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
111 vec = base->tv3.vec + i;
112 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
113 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
114 vec = base->tv4.vec + i;
115 } else if ((signed long) idx < 0) {
117 * Can happen if you add a timer with expires == jiffies,
118 * or you set a timer to go off in the past
120 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
123 /* If the timeout is larger than 0xffffffff on 64-bit
124 * architectures then we use the maximum timeout:
126 if (idx > 0xffffffffUL) {
128 expires = idx + base->timer_jiffies;
130 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
131 vec = base->tv5.vec + i;
136 list_add_tail(&timer->entry, vec);
140 * init_timer - initialize a timer.
141 * @timer: the timer to be initialized
143 * init_timer() must be done to a timer prior calling *any* of the
144 * other timer functions.
146 void fastcall init_timer(struct timer_list *timer)
148 timer->entry.next = NULL;
149 timer->base = __raw_get_cpu_var(tvec_bases);
151 EXPORT_SYMBOL(init_timer);
153 static inline void detach_timer(struct timer_list *timer,
156 struct list_head *entry = &timer->entry;
158 __list_del(entry->prev, entry->next);
161 entry->prev = LIST_POISON2;
165 * We are using hashed locking: holding per_cpu(tvec_bases).lock
166 * means that all timers which are tied to this base via timer->base are
167 * locked, and the base itself is locked too.
169 * So __run_timers/migrate_timers can safely modify all timers which could
170 * be found on ->tvX lists.
172 * When the timer's base is locked, and the timer removed from list, it is
173 * possible to set timer->base = NULL and drop the lock: the timer remains
176 static tvec_base_t *lock_timer_base(struct timer_list *timer,
177 unsigned long *flags)
183 if (likely(base != NULL)) {
184 spin_lock_irqsave(&base->lock, *flags);
185 if (likely(base == timer->base))
187 /* The timer has migrated to another CPU */
188 spin_unlock_irqrestore(&base->lock, *flags);
194 int __mod_timer(struct timer_list *timer, unsigned long expires)
196 tvec_base_t *base, *new_base;
200 BUG_ON(!timer->function);
202 base = lock_timer_base(timer, &flags);
204 if (timer_pending(timer)) {
205 detach_timer(timer, 0);
209 new_base = __get_cpu_var(tvec_bases);
211 if (base != new_base) {
213 * We are trying to schedule the timer on the local CPU.
214 * However we can't change timer's base while it is running,
215 * otherwise del_timer_sync() can't detect that the timer's
216 * handler yet has not finished. This also guarantees that
217 * the timer is serialized wrt itself.
219 if (likely(base->running_timer != timer)) {
220 /* See the comment in lock_timer_base() */
222 spin_unlock(&base->lock);
224 spin_lock(&base->lock);
229 timer->expires = expires;
230 internal_add_timer(base, timer);
231 spin_unlock_irqrestore(&base->lock, flags);
236 EXPORT_SYMBOL(__mod_timer);
239 * add_timer_on - start a timer on a particular CPU
240 * @timer: the timer to be added
241 * @cpu: the CPU to start it on
243 * This is not very scalable on SMP. Double adds are not possible.
245 void add_timer_on(struct timer_list *timer, int cpu)
247 tvec_base_t *base = per_cpu(tvec_bases, cpu);
250 BUG_ON(timer_pending(timer) || !timer->function);
251 spin_lock_irqsave(&base->lock, flags);
253 internal_add_timer(base, timer);
254 spin_unlock_irqrestore(&base->lock, flags);
259 * mod_timer - modify a timer's timeout
260 * @timer: the timer to be modified
262 * mod_timer is a more efficient way to update the expire field of an
263 * active timer (if the timer is inactive it will be activated)
265 * mod_timer(timer, expires) is equivalent to:
267 * del_timer(timer); timer->expires = expires; add_timer(timer);
269 * Note that if there are multiple unserialized concurrent users of the
270 * same timer, then mod_timer() is the only safe way to modify the timeout,
271 * since add_timer() cannot modify an already running timer.
273 * The function returns whether it has modified a pending timer or not.
274 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
275 * active timer returns 1.)
277 int mod_timer(struct timer_list *timer, unsigned long expires)
279 BUG_ON(!timer->function);
282 * This is a common optimization triggered by the
283 * networking code - if the timer is re-modified
284 * to be the same thing then just return:
286 if (timer->expires == expires && timer_pending(timer))
289 return __mod_timer(timer, expires);
292 EXPORT_SYMBOL(mod_timer);
295 * del_timer - deactive a timer.
296 * @timer: the timer to be deactivated
298 * del_timer() deactivates a timer - this works on both active and inactive
301 * The function returns whether it has deactivated a pending timer or not.
302 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
303 * active timer returns 1.)
305 int del_timer(struct timer_list *timer)
311 if (timer_pending(timer)) {
312 base = lock_timer_base(timer, &flags);
313 if (timer_pending(timer)) {
314 detach_timer(timer, 1);
317 spin_unlock_irqrestore(&base->lock, flags);
323 EXPORT_SYMBOL(del_timer);
327 * This function tries to deactivate a timer. Upon successful (ret >= 0)
328 * exit the timer is not queued and the handler is not running on any CPU.
330 * It must not be called from interrupt contexts.
332 int try_to_del_timer_sync(struct timer_list *timer)
338 base = lock_timer_base(timer, &flags);
340 if (base->running_timer == timer)
344 if (timer_pending(timer)) {
345 detach_timer(timer, 1);
349 spin_unlock_irqrestore(&base->lock, flags);
355 * del_timer_sync - deactivate a timer and wait for the handler to finish.
356 * @timer: the timer to be deactivated
358 * This function only differs from del_timer() on SMP: besides deactivating
359 * the timer it also makes sure the handler has finished executing on other
362 * Synchronization rules: callers must prevent restarting of the timer,
363 * otherwise this function is meaningless. It must not be called from
364 * interrupt contexts. The caller must not hold locks which would prevent
365 * completion of the timer's handler. The timer's handler must not call
366 * add_timer_on(). Upon exit the timer is not queued and the handler is
367 * not running on any CPU.
369 * The function returns whether it has deactivated a pending timer or not.
371 int del_timer_sync(struct timer_list *timer)
374 int ret = try_to_del_timer_sync(timer);
380 EXPORT_SYMBOL(del_timer_sync);
383 static int cascade(tvec_base_t *base, tvec_t *tv, int index)
385 /* cascade all the timers from tv up one level */
386 struct timer_list *timer, *tmp;
387 struct list_head tv_list;
389 list_replace_init(tv->vec + index, &tv_list);
392 * We are removing _all_ timers from the list, so we
393 * don't have to detach them individually.
395 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
396 BUG_ON(timer->base != base);
397 internal_add_timer(base, timer);
404 * __run_timers - run all expired timers (if any) on this CPU.
405 * @base: the timer vector to be processed.
407 * This function cascades all vectors and executes all expired timer
410 #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
412 static inline void __run_timers(tvec_base_t *base)
414 struct timer_list *timer;
416 spin_lock_irq(&base->lock);
417 while (time_after_eq(jiffies, base->timer_jiffies)) {
418 struct list_head work_list;
419 struct list_head *head = &work_list;
420 int index = base->timer_jiffies & TVR_MASK;
426 (!cascade(base, &base->tv2, INDEX(0))) &&
427 (!cascade(base, &base->tv3, INDEX(1))) &&
428 !cascade(base, &base->tv4, INDEX(2)))
429 cascade(base, &base->tv5, INDEX(3));
430 ++base->timer_jiffies;
431 list_replace_init(base->tv1.vec + index, &work_list);
432 while (!list_empty(head)) {
433 void (*fn)(unsigned long);
436 timer = list_entry(head->next,struct timer_list,entry);
437 fn = timer->function;
440 set_running_timer(base, timer);
441 detach_timer(timer, 1);
442 spin_unlock_irq(&base->lock);
444 int preempt_count = preempt_count();
446 if (preempt_count != preempt_count()) {
447 printk(KERN_WARNING "huh, entered %p "
448 "with preempt_count %08x, exited"
455 spin_lock_irq(&base->lock);
458 set_running_timer(base, NULL);
459 spin_unlock_irq(&base->lock);
462 #ifdef CONFIG_NO_IDLE_HZ
464 * Find out when the next timer event is due to happen. This
465 * is used on S/390 to stop all activity when a cpus is idle.
466 * This functions needs to be called disabled.
468 unsigned long next_timer_interrupt(void)
471 struct list_head *list;
472 struct timer_list *nte;
473 unsigned long expires;
474 unsigned long hr_expires = MAX_JIFFY_OFFSET;
479 hr_delta = hrtimer_get_next_event();
480 if (hr_delta.tv64 != KTIME_MAX) {
481 struct timespec tsdelta;
482 tsdelta = ktime_to_timespec(hr_delta);
483 hr_expires = timespec_to_jiffies(&tsdelta);
485 return hr_expires + jiffies;
487 hr_expires += jiffies;
489 base = __get_cpu_var(tvec_bases);
490 spin_lock(&base->lock);
491 expires = base->timer_jiffies + (LONG_MAX >> 1);
494 /* Look for timer events in tv1. */
495 j = base->timer_jiffies & TVR_MASK;
497 list_for_each_entry(nte, base->tv1.vec + j, entry) {
498 expires = nte->expires;
499 if (j < (base->timer_jiffies & TVR_MASK))
500 list = base->tv2.vec + (INDEX(0));
503 j = (j + 1) & TVR_MASK;
504 } while (j != (base->timer_jiffies & TVR_MASK));
507 varray[0] = &base->tv2;
508 varray[1] = &base->tv3;
509 varray[2] = &base->tv4;
510 varray[3] = &base->tv5;
511 for (i = 0; i < 4; i++) {
514 if (list_empty(varray[i]->vec + j)) {
515 j = (j + 1) & TVN_MASK;
518 list_for_each_entry(nte, varray[i]->vec + j, entry)
519 if (time_before(nte->expires, expires))
520 expires = nte->expires;
521 if (j < (INDEX(i)) && i < 3)
522 list = varray[i + 1]->vec + (INDEX(i + 1));
524 } while (j != (INDEX(i)));
529 * The search wrapped. We need to look at the next list
530 * from next tv element that would cascade into tv element
531 * where we found the timer element.
533 list_for_each_entry(nte, list, entry) {
534 if (time_before(nte->expires, expires))
535 expires = nte->expires;
538 spin_unlock(&base->lock);
541 * It can happen that other CPUs service timer IRQs and increment
542 * jiffies, but we have not yet got a local timer tick to process
543 * the timer wheels. In that case, the expiry time can be before
544 * jiffies, but since the high-resolution timer here is relative to
545 * jiffies, the default expression when high-resolution timers are
548 * time_before(MAX_JIFFY_OFFSET + jiffies, expires)
550 * would falsely evaluate to true. If that is the case, just
551 * return jiffies so that we can immediately fire the local timer
553 if (time_before(expires, jiffies))
556 if (time_before(hr_expires, expires))
563 /******************************************************************/
566 * Timekeeping variables
568 unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
569 unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */
573 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
574 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
575 * at zero at system boot time, so wall_to_monotonic will be negative,
576 * however, we will ALWAYS keep the tv_nsec part positive so we can use
577 * the usual normalization.
579 struct timespec xtime __attribute__ ((aligned (16)));
580 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
582 EXPORT_SYMBOL(xtime);
584 /* Don't completely fail for HZ > 500. */
585 int tickadj = 500/HZ ? : 1; /* microsecs */
589 * phase-lock loop variables
591 /* TIME_ERROR prevents overwriting the CMOS clock */
592 int time_state = TIME_OK; /* clock synchronization status */
593 int time_status = STA_UNSYNC; /* clock status bits */
594 long time_offset; /* time adjustment (us) */
595 long time_constant = 2; /* pll time constant */
596 long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
597 long time_precision = 1; /* clock precision (us) */
598 long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
599 long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
600 static long time_phase; /* phase offset (scaled us) */
601 long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
602 /* frequency offset (scaled ppm)*/
603 static long time_adj; /* tick adjust (scaled 1 / HZ) */
604 long time_reftime; /* time at last adjustment (s) */
606 long time_next_adjust;
609 * this routine handles the overflow of the microsecond field
611 * The tricky bits of code to handle the accurate clock support
612 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
613 * They were originally developed for SUN and DEC kernels.
614 * All the kudos should go to Dave for this stuff.
617 static void second_overflow(void)
621 /* Bump the maxerror field */
622 time_maxerror += time_tolerance >> SHIFT_USEC;
623 if (time_maxerror > NTP_PHASE_LIMIT) {
624 time_maxerror = NTP_PHASE_LIMIT;
625 time_status |= STA_UNSYNC;
629 * Leap second processing. If in leap-insert state at the end of the
630 * day, the system clock is set back one second; if in leap-delete
631 * state, the system clock is set ahead one second. The microtime()
632 * routine or external clock driver will insure that reported time is
633 * always monotonic. The ugly divides should be replaced.
635 switch (time_state) {
637 if (time_status & STA_INS)
638 time_state = TIME_INS;
639 else if (time_status & STA_DEL)
640 time_state = TIME_DEL;
643 if (xtime.tv_sec % 86400 == 0) {
645 wall_to_monotonic.tv_sec++;
647 * The timer interpolator will make time change
648 * gradually instead of an immediate jump by one second
650 time_interpolator_update(-NSEC_PER_SEC);
651 time_state = TIME_OOP;
653 printk(KERN_NOTICE "Clock: inserting leap second "
658 if ((xtime.tv_sec + 1) % 86400 == 0) {
660 wall_to_monotonic.tv_sec--;
662 * Use of time interpolator for a gradual change of
665 time_interpolator_update(NSEC_PER_SEC);
666 time_state = TIME_WAIT;
668 printk(KERN_NOTICE "Clock: deleting leap second "
673 time_state = TIME_WAIT;
676 if (!(time_status & (STA_INS | STA_DEL)))
677 time_state = TIME_OK;
681 * Compute the phase adjustment for the next second. In PLL mode, the
682 * offset is reduced by a fixed factor times the time constant. In FLL
683 * mode the offset is used directly. In either mode, the maximum phase
684 * adjustment for each second is clamped so as to spread the adjustment
685 * over not more than the number of seconds between updates.
688 if (!(time_status & STA_FLL))
689 ltemp = shift_right(ltemp, SHIFT_KG + time_constant);
690 ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE);
691 ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE);
692 time_offset -= ltemp;
693 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
696 * Compute the frequency estimate and additional phase adjustment due
697 * to frequency error for the next second.
700 time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE));
704 * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to
705 * get 128.125; => only 0.125% error (p. 14)
707 time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5);
711 * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and
712 * 0.78125% to get 255.85938; => only 0.05% error (p. 14)
714 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
718 * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and
719 * 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
721 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
726 * Returns how many microseconds we need to add to xtime this tick
727 * in doing an adjustment requested with adjtime.
729 static long adjtime_adjustment(void)
731 long time_adjust_step;
733 time_adjust_step = time_adjust;
734 if (time_adjust_step) {
736 * We are doing an adjtime thing. Prepare time_adjust_step to
737 * be within bounds. Note that a positive time_adjust means we
738 * want the clock to run faster.
740 * Limit the amount of the step to be in the range
741 * -tickadj .. +tickadj
743 time_adjust_step = min(time_adjust_step, (long)tickadj);
744 time_adjust_step = max(time_adjust_step, (long)-tickadj);
746 return time_adjust_step;
749 /* in the NTP reference this is called "hardclock()" */
750 static void update_wall_time_one_tick(void)
752 long time_adjust_step, delta_nsec;
754 time_adjust_step = adjtime_adjustment();
755 if (time_adjust_step)
756 /* Reduce by this step the amount of time left */
757 time_adjust -= time_adjust_step;
758 delta_nsec = tick_nsec + time_adjust_step * 1000;
760 * Advance the phase, once it gets to one microsecond, then
761 * advance the tick more.
763 time_phase += time_adj;
764 if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) {
765 long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10));
766 time_phase -= ltemp << (SHIFT_SCALE - 10);
769 xtime.tv_nsec += delta_nsec;
770 time_interpolator_update(delta_nsec);
772 /* Changes by adjtime() do not take effect till next tick. */
773 if (time_next_adjust != 0) {
774 time_adjust = time_next_adjust;
775 time_next_adjust = 0;
780 * Return how long ticks are at the moment, that is, how much time
781 * update_wall_time_one_tick will add to xtime next time we call it
782 * (assuming no calls to do_adjtimex in the meantime).
783 * The return value is in fixed-point nanoseconds shifted by the
784 * specified number of bits to the right of the binary point.
785 * This function has no side-effects.
787 u64 current_tick_length(long shift)
792 /* calculate the finest interval NTP will allow.
793 * ie: nanosecond value shifted by (SHIFT_SCALE - 10)
795 delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
796 ret = ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
798 /* convert from (SHIFT_SCALE - 10) to specified shift scale: */
799 shift = shift - (SHIFT_SCALE - 10);
808 /* XXX - all of this timekeeping code should be later moved to time.c */
809 #include <linux/clocksource.h>
810 static struct clocksource *clock; /* pointer to current clocksource */
811 static cycle_t last_clock_cycle; /* cycle value at last update_wall_time */
813 * timekeeping_init - Initializes the clocksource and common timekeeping values
815 void __init timekeeping_init(void)
819 write_seqlock_irqsave(&xtime_lock, flags);
820 clock = get_next_clocksource();
821 calculate_clocksource_interval(clock, tick_nsec);
822 last_clock_cycle = read_clocksource(clock);
824 write_sequnlock_irqrestore(&xtime_lock, flags);
829 * timekeeping_resume - Resumes the generic timekeeping subsystem.
832 * This is for the generic clocksource timekeeping.
833 * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are
834 * still managed by arch specific suspend/resume code.
836 static int timekeeping_resume(struct sys_device *dev)
840 write_seqlock_irqsave(&xtime_lock, flags);
841 /* restart the last cycle value */
842 last_clock_cycle = read_clocksource(clock);
843 write_sequnlock_irqrestore(&xtime_lock, flags);
847 /* sysfs resume/suspend bits for timekeeping */
848 static struct sysdev_class timekeeping_sysclass = {
849 .resume = timekeeping_resume,
850 set_kset_name("timekeeping"),
853 static struct sys_device device_timer = {
855 .cls = &timekeeping_sysclass,
858 static int __init timekeeping_init_device(void)
860 int error = sysdev_class_register(&timekeeping_sysclass);
862 error = sysdev_register(&device_timer);
866 device_initcall(timekeeping_init_device);
869 * update_wall_time - Uses the current clocksource to increment the wall time
871 * Called from the timer interrupt, must hold a write on xtime_lock.
873 static void update_wall_time(void)
877 now = read_clocksource(clock);
878 offset = (now - last_clock_cycle)&clock->mask;
880 /* normally this loop will run just once, however in the
881 * case of lost or late ticks, it will accumulate correctly.
883 while (offset > clock->interval_cycles) {
884 /* accumulate one interval */
885 last_clock_cycle += clock->interval_cycles;
886 offset -= clock->interval_cycles;
888 update_wall_time_one_tick();
889 if (xtime.tv_nsec >= 1000000000) {
890 xtime.tv_nsec -= 1000000000;
898 * Called from the timer interrupt handler to charge one tick to the current
899 * process. user_tick is 1 if the tick is user time, 0 for system.
901 void update_process_times(int user_tick)
903 struct task_struct *p = current;
904 int cpu = smp_processor_id();
906 /* Note: this timer irq context must be accounted for as well. */
908 account_user_time(p, jiffies_to_cputime(1));
910 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
912 if (rcu_pending(cpu))
913 rcu_check_callbacks(cpu, user_tick);
915 run_posix_cpu_timers(p);
919 * Nr of active tasks - counted in fixed-point numbers
921 static unsigned long count_active_tasks(void)
923 return nr_active() * FIXED_1;
927 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
928 * imply that avenrun[] is the standard name for this kind of thing.
929 * Nothing else seems to be standardized: the fractional size etc
930 * all seem to differ on different machines.
932 * Requires xtime_lock to access.
934 unsigned long avenrun[3];
936 EXPORT_SYMBOL(avenrun);
939 * calc_load - given tick count, update the avenrun load estimates.
940 * This is called while holding a write_lock on xtime_lock.
942 static inline void calc_load(unsigned long ticks)
944 unsigned long active_tasks; /* fixed-point */
945 static int count = LOAD_FREQ;
950 active_tasks = count_active_tasks();
951 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
952 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
953 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
957 /* jiffies at the most recent update of wall time */
958 unsigned long wall_jiffies = INITIAL_JIFFIES;
961 * This read-write spinlock protects us from races in SMP while
962 * playing with xtime and avenrun.
964 #ifndef ARCH_HAVE_XTIME_LOCK
965 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
967 EXPORT_SYMBOL(xtime_lock);
971 * This function runs timers and the timer-tq in bottom half context.
973 static void run_timer_softirq(struct softirq_action *h)
975 tvec_base_t *base = __get_cpu_var(tvec_bases);
977 hrtimer_run_queues();
978 if (time_after_eq(jiffies, base->timer_jiffies))
983 * Called by the local, per-CPU timer interrupt on SMP.
985 void run_local_timers(void)
987 raise_softirq(TIMER_SOFTIRQ);
992 * Called by the timer interrupt. xtime_lock must already be taken
995 static inline void update_times(void)
999 ticks = jiffies - wall_jiffies;
1000 wall_jiffies += ticks;
1006 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1007 * without sampling the sequence number in xtime_lock.
1008 * jiffies is defined in the linker script...
1011 void do_timer(struct pt_regs *regs)
1014 /* prevent loading jiffies before storing new jiffies_64 value. */
1019 #ifdef __ARCH_WANT_SYS_ALARM
1022 * For backwards compatibility? This can be done in libc so Alpha
1023 * and all newer ports shouldn't need it.
1025 asmlinkage unsigned long sys_alarm(unsigned int seconds)
1027 return alarm_setitimer(seconds);
1035 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1036 * should be moved into arch/i386 instead?
1040 * sys_getpid - return the thread group id of the current process
1042 * Note, despite the name, this returns the tgid not the pid. The tgid and
1043 * the pid are identical unless CLONE_THREAD was specified on clone() in
1044 * which case the tgid is the same in all threads of the same group.
1046 * This is SMP safe as current->tgid does not change.
1048 asmlinkage long sys_getpid(void)
1050 return current->tgid;
1054 * Accessing ->group_leader->real_parent is not SMP-safe, it could
1055 * change from under us. However, rather than getting any lock
1056 * we can use an optimistic algorithm: get the parent
1057 * pid, and go back and check that the parent is still
1058 * the same. If it has changed (which is extremely unlikely
1059 * indeed), we just try again..
1061 * NOTE! This depends on the fact that even if we _do_
1062 * get an old value of "parent", we can happily dereference
1063 * the pointer (it was and remains a dereferencable kernel pointer
1064 * no matter what): we just can't necessarily trust the result
1065 * until we know that the parent pointer is valid.
1067 * NOTE2: ->group_leader never changes from under us.
1069 asmlinkage long sys_getppid(void)
1072 struct task_struct *me = current;
1073 struct task_struct *parent;
1075 parent = me->group_leader->real_parent;
1078 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1080 struct task_struct *old = parent;
1083 * Make sure we read the pid before re-reading the
1087 parent = me->group_leader->real_parent;
1097 asmlinkage long sys_getuid(void)
1099 /* Only we change this so SMP safe */
1100 return current->uid;
1103 asmlinkage long sys_geteuid(void)
1105 /* Only we change this so SMP safe */
1106 return current->euid;
1109 asmlinkage long sys_getgid(void)
1111 /* Only we change this so SMP safe */
1112 return current->gid;
1115 asmlinkage long sys_getegid(void)
1117 /* Only we change this so SMP safe */
1118 return current->egid;
1123 static void process_timeout(unsigned long __data)
1125 wake_up_process((task_t *)__data);
1129 * schedule_timeout - sleep until timeout
1130 * @timeout: timeout value in jiffies
1132 * Make the current task sleep until @timeout jiffies have
1133 * elapsed. The routine will return immediately unless
1134 * the current task state has been set (see set_current_state()).
1136 * You can set the task state as follows -
1138 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1139 * pass before the routine returns. The routine will return 0
1141 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1142 * delivered to the current task. In this case the remaining time
1143 * in jiffies will be returned, or 0 if the timer expired in time
1145 * The current task state is guaranteed to be TASK_RUNNING when this
1148 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1149 * the CPU away without a bound on the timeout. In this case the return
1150 * value will be %MAX_SCHEDULE_TIMEOUT.
1152 * In all cases the return value is guaranteed to be non-negative.
1154 fastcall signed long __sched schedule_timeout(signed long timeout)
1156 struct timer_list timer;
1157 unsigned long expire;
1161 case MAX_SCHEDULE_TIMEOUT:
1163 * These two special cases are useful to be comfortable
1164 * in the caller. Nothing more. We could take
1165 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1166 * but I' d like to return a valid offset (>=0) to allow
1167 * the caller to do everything it want with the retval.
1173 * Another bit of PARANOID. Note that the retval will be
1174 * 0 since no piece of kernel is supposed to do a check
1175 * for a negative retval of schedule_timeout() (since it
1176 * should never happens anyway). You just have the printk()
1177 * that will tell you if something is gone wrong and where.
1181 printk(KERN_ERR "schedule_timeout: wrong timeout "
1182 "value %lx from %p\n", timeout,
1183 __builtin_return_address(0));
1184 current->state = TASK_RUNNING;
1189 expire = timeout + jiffies;
1191 setup_timer(&timer, process_timeout, (unsigned long)current);
1192 __mod_timer(&timer, expire);
1194 del_singleshot_timer_sync(&timer);
1196 timeout = expire - jiffies;
1199 return timeout < 0 ? 0 : timeout;
1201 EXPORT_SYMBOL(schedule_timeout);
1204 * We can use __set_current_state() here because schedule_timeout() calls
1205 * schedule() unconditionally.
1207 signed long __sched schedule_timeout_interruptible(signed long timeout)
1209 __set_current_state(TASK_INTERRUPTIBLE);
1210 return schedule_timeout(timeout);
1212 EXPORT_SYMBOL(schedule_timeout_interruptible);
1214 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1216 __set_current_state(TASK_UNINTERRUPTIBLE);
1217 return schedule_timeout(timeout);
1219 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1221 /* Thread ID - the internal kernel "pid" */
1222 asmlinkage long sys_gettid(void)
1224 return current->pid;
1228 * sys_sysinfo - fill in sysinfo struct
1230 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1233 unsigned long mem_total, sav_total;
1234 unsigned int mem_unit, bitcount;
1237 memset((char *)&val, 0, sizeof(struct sysinfo));
1241 seq = read_seqbegin(&xtime_lock);
1244 * This is annoying. The below is the same thing
1245 * posix_get_clock_monotonic() does, but it wants to
1246 * take the lock which we want to cover the loads stuff
1250 getnstimeofday(&tp);
1251 tp.tv_sec += wall_to_monotonic.tv_sec;
1252 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1253 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1254 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1257 val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1259 val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1260 val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1261 val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1263 val.procs = nr_threads;
1264 } while (read_seqretry(&xtime_lock, seq));
1270 * If the sum of all the available memory (i.e. ram + swap)
1271 * is less than can be stored in a 32 bit unsigned long then
1272 * we can be binary compatible with 2.2.x kernels. If not,
1273 * well, in that case 2.2.x was broken anyways...
1275 * -Erik Andersen <andersee@debian.org>
1278 mem_total = val.totalram + val.totalswap;
1279 if (mem_total < val.totalram || mem_total < val.totalswap)
1282 mem_unit = val.mem_unit;
1283 while (mem_unit > 1) {
1286 sav_total = mem_total;
1288 if (mem_total < sav_total)
1293 * If mem_total did not overflow, multiply all memory values by
1294 * val.mem_unit and set it to 1. This leaves things compatible
1295 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1300 val.totalram <<= bitcount;
1301 val.freeram <<= bitcount;
1302 val.sharedram <<= bitcount;
1303 val.bufferram <<= bitcount;
1304 val.totalswap <<= bitcount;
1305 val.freeswap <<= bitcount;
1306 val.totalhigh <<= bitcount;
1307 val.freehigh <<= bitcount;
1310 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1316 static int __devinit init_timers_cpu(int cpu)
1320 static char __devinitdata tvec_base_done[NR_CPUS];
1322 if (!tvec_base_done[cpu]) {
1323 static char boot_done;
1327 * The APs use this path later in boot
1329 base = kmalloc_node(sizeof(*base), GFP_KERNEL,
1333 memset(base, 0, sizeof(*base));
1334 per_cpu(tvec_bases, cpu) = base;
1337 * This is for the boot CPU - we use compile-time
1338 * static initialisation because per-cpu memory isn't
1339 * ready yet and because the memory allocators are not
1340 * initialised either.
1343 base = &boot_tvec_bases;
1345 tvec_base_done[cpu] = 1;
1347 base = per_cpu(tvec_bases, cpu);
1350 spin_lock_init(&base->lock);
1351 for (j = 0; j < TVN_SIZE; j++) {
1352 INIT_LIST_HEAD(base->tv5.vec + j);
1353 INIT_LIST_HEAD(base->tv4.vec + j);
1354 INIT_LIST_HEAD(base->tv3.vec + j);
1355 INIT_LIST_HEAD(base->tv2.vec + j);
1357 for (j = 0; j < TVR_SIZE; j++)
1358 INIT_LIST_HEAD(base->tv1.vec + j);
1360 base->timer_jiffies = jiffies;
1364 #ifdef CONFIG_HOTPLUG_CPU
1365 static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1367 struct timer_list *timer;
1369 while (!list_empty(head)) {
1370 timer = list_entry(head->next, struct timer_list, entry);
1371 detach_timer(timer, 0);
1372 timer->base = new_base;
1373 internal_add_timer(new_base, timer);
1377 static void __devinit migrate_timers(int cpu)
1379 tvec_base_t *old_base;
1380 tvec_base_t *new_base;
1383 BUG_ON(cpu_online(cpu));
1384 old_base = per_cpu(tvec_bases, cpu);
1385 new_base = get_cpu_var(tvec_bases);
1387 local_irq_disable();
1388 spin_lock(&new_base->lock);
1389 spin_lock(&old_base->lock);
1391 BUG_ON(old_base->running_timer);
1393 for (i = 0; i < TVR_SIZE; i++)
1394 migrate_timer_list(new_base, old_base->tv1.vec + i);
1395 for (i = 0; i < TVN_SIZE; i++) {
1396 migrate_timer_list(new_base, old_base->tv2.vec + i);
1397 migrate_timer_list(new_base, old_base->tv3.vec + i);
1398 migrate_timer_list(new_base, old_base->tv4.vec + i);
1399 migrate_timer_list(new_base, old_base->tv5.vec + i);
1402 spin_unlock(&old_base->lock);
1403 spin_unlock(&new_base->lock);
1405 put_cpu_var(tvec_bases);
1407 #endif /* CONFIG_HOTPLUG_CPU */
1409 static int timer_cpu_notify(struct notifier_block *self,
1410 unsigned long action, void *hcpu)
1412 long cpu = (long)hcpu;
1414 case CPU_UP_PREPARE:
1415 if (init_timers_cpu(cpu) < 0)
1418 #ifdef CONFIG_HOTPLUG_CPU
1420 migrate_timers(cpu);
1429 static struct notifier_block timers_nb = {
1430 .notifier_call = timer_cpu_notify,
1434 void __init init_timers(void)
1436 timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1437 (void *)(long)smp_processor_id());
1438 register_cpu_notifier(&timers_nb);
1439 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1442 #ifdef CONFIG_TIME_INTERPOLATION
1444 struct time_interpolator *time_interpolator __read_mostly;
1445 static struct time_interpolator *time_interpolator_list __read_mostly;
1446 static DEFINE_SPINLOCK(time_interpolator_lock);
1448 static inline u64 time_interpolator_get_cycles(unsigned int src)
1450 unsigned long (*x)(void);
1454 case TIME_SOURCE_FUNCTION:
1455 x = time_interpolator->addr;
1458 case TIME_SOURCE_MMIO64 :
1459 return readq_relaxed((void __iomem *)time_interpolator->addr);
1461 case TIME_SOURCE_MMIO32 :
1462 return readl_relaxed((void __iomem *)time_interpolator->addr);
1464 default: return get_cycles();
1468 static inline u64 time_interpolator_get_counter(int writelock)
1470 unsigned int src = time_interpolator->source;
1472 if (time_interpolator->jitter)
1478 lcycle = time_interpolator->last_cycle;
1479 now = time_interpolator_get_cycles(src);
1480 if (lcycle && time_after(lcycle, now))
1483 /* When holding the xtime write lock, there's no need
1484 * to add the overhead of the cmpxchg. Readers are
1485 * force to retry until the write lock is released.
1488 time_interpolator->last_cycle = now;
1491 /* Keep track of the last timer value returned. The use of cmpxchg here
1492 * will cause contention in an SMP environment.
1494 } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
1498 return time_interpolator_get_cycles(src);
1501 void time_interpolator_reset(void)
1503 time_interpolator->offset = 0;
1504 time_interpolator->last_counter = time_interpolator_get_counter(1);
1507 #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
1509 unsigned long time_interpolator_get_offset(void)
1511 /* If we do not have a time interpolator set up then just return zero */
1512 if (!time_interpolator)
1515 return time_interpolator->offset +
1516 GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
1519 #define INTERPOLATOR_ADJUST 65536
1520 #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
1522 static void time_interpolator_update(long delta_nsec)
1525 unsigned long offset;
1527 /* If there is no time interpolator set up then do nothing */
1528 if (!time_interpolator)
1532 * The interpolator compensates for late ticks by accumulating the late
1533 * time in time_interpolator->offset. A tick earlier than expected will
1534 * lead to a reset of the offset and a corresponding jump of the clock
1535 * forward. Again this only works if the interpolator clock is running
1536 * slightly slower than the regular clock and the tuning logic insures
1540 counter = time_interpolator_get_counter(1);
1541 offset = time_interpolator->offset +
1542 GET_TI_NSECS(counter, time_interpolator);
1544 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
1545 time_interpolator->offset = offset - delta_nsec;
1547 time_interpolator->skips++;
1548 time_interpolator->ns_skipped += delta_nsec - offset;
1549 time_interpolator->offset = 0;
1551 time_interpolator->last_counter = counter;
1553 /* Tuning logic for time interpolator invoked every minute or so.
1554 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1555 * Increase interpolator clock speed if we skip too much time.
1557 if (jiffies % INTERPOLATOR_ADJUST == 0)
1559 if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
1560 time_interpolator->nsec_per_cyc--;
1561 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1562 time_interpolator->nsec_per_cyc++;
1563 time_interpolator->skips = 0;
1564 time_interpolator->ns_skipped = 0;
1569 is_better_time_interpolator(struct time_interpolator *new)
1571 if (!time_interpolator)
1573 return new->frequency > 2*time_interpolator->frequency ||
1574 (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
1578 register_time_interpolator(struct time_interpolator *ti)
1580 unsigned long flags;
1583 BUG_ON(ti->frequency == 0 || ti->mask == 0);
1585 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1586 spin_lock(&time_interpolator_lock);
1587 write_seqlock_irqsave(&xtime_lock, flags);
1588 if (is_better_time_interpolator(ti)) {
1589 time_interpolator = ti;
1590 time_interpolator_reset();
1592 write_sequnlock_irqrestore(&xtime_lock, flags);
1594 ti->next = time_interpolator_list;
1595 time_interpolator_list = ti;
1596 spin_unlock(&time_interpolator_lock);
1600 unregister_time_interpolator(struct time_interpolator *ti)
1602 struct time_interpolator *curr, **prev;
1603 unsigned long flags;
1605 spin_lock(&time_interpolator_lock);
1606 prev = &time_interpolator_list;
1607 for (curr = *prev; curr; curr = curr->next) {
1615 write_seqlock_irqsave(&xtime_lock, flags);
1616 if (ti == time_interpolator) {
1617 /* we lost the best time-interpolator: */
1618 time_interpolator = NULL;
1619 /* find the next-best interpolator */
1620 for (curr = time_interpolator_list; curr; curr = curr->next)
1621 if (is_better_time_interpolator(curr))
1622 time_interpolator = curr;
1623 time_interpolator_reset();
1625 write_sequnlock_irqrestore(&xtime_lock, flags);
1626 spin_unlock(&time_interpolator_lock);
1628 #endif /* CONFIG_TIME_INTERPOLATION */
1631 * msleep - sleep safely even with waitqueue interruptions
1632 * @msecs: Time in milliseconds to sleep for
1634 void msleep(unsigned int msecs)
1636 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1639 timeout = schedule_timeout_uninterruptible(timeout);
1642 EXPORT_SYMBOL(msleep);
1645 * msleep_interruptible - sleep waiting for signals
1646 * @msecs: Time in milliseconds to sleep for
1648 unsigned long msleep_interruptible(unsigned int msecs)
1650 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1652 while (timeout && !signal_pending(current))
1653 timeout = schedule_timeout_interruptible(timeout);
1654 return jiffies_to_msecs(timeout);
1657 EXPORT_SYMBOL(msleep_interruptible);