]> err.no Git - linux-2.6/blobdiff - kernel/timer.c
[PATCH] Time: Use clocksource infrastructure for update_wall_time
[linux-2.6] / kernel / timer.c
index d355d5a4d5ae608d2572c8f08b018952c7e52050..524c7f638365541d4e477706669dec4698ab6e62 100644 (file)
@@ -146,7 +146,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
 void fastcall init_timer(struct timer_list *timer)
 {
        timer->entry.next = NULL;
-       timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
+       timer->base = __raw_get_cpu_var(tvec_bases);
 }
 EXPORT_SYMBOL(init_timer);
 
@@ -383,23 +383,19 @@ EXPORT_SYMBOL(del_timer_sync);
 static int cascade(tvec_base_t *base, tvec_t *tv, int index)
 {
        /* cascade all the timers from tv up one level */
-       struct list_head *head, *curr;
+       struct timer_list *timer, *tmp;
+       struct list_head tv_list;
+
+       list_replace_init(tv->vec + index, &tv_list);
 
-       head = tv->vec + index;
-       curr = head->next;
        /*
-        * We are removing _all_ timers from the list, so we don't  have to
-        * detach them individually, just clear the list afterwards.
+        * We are removing _all_ timers from the list, so we
+        * don't have to detach them individually.
         */
-       while (curr != head) {
-               struct timer_list *tmp;
-
-               tmp = list_entry(curr, struct timer_list, entry);
-               BUG_ON(tmp->base != base);
-               curr = curr->next;
-               internal_add_timer(base, tmp);
+       list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
+               BUG_ON(timer->base != base);
+               internal_add_timer(base, timer);
        }
-       INIT_LIST_HEAD(head);
 
        return index;
 }
@@ -419,10 +415,10 @@ static inline void __run_timers(tvec_base_t *base)
 
        spin_lock_irq(&base->lock);
        while (time_after_eq(jiffies, base->timer_jiffies)) {
-               struct list_head work_list = LIST_HEAD_INIT(work_list);
+               struct list_head work_list;
                struct list_head *head = &work_list;
                int index = base->timer_jiffies & TVR_MASK;
+
                /*
                 * Cascade timers:
                 */
@@ -431,8 +427,8 @@ static inline void __run_timers(tvec_base_t *base)
                                (!cascade(base, &base->tv3, INDEX(1))) &&
                                        !cascade(base, &base->tv4, INDEX(2)))
                        cascade(base, &base->tv5, INDEX(3));
-               ++base->timer_jiffies; 
-               list_splice_init(base->tv1.vec + index, &work_list);
+               ++base->timer_jiffies;
+               list_replace_init(base->tv1.vec + index, &work_list);
                while (!list_empty(head)) {
                        void (*fn)(unsigned long);
                        unsigned long data;
@@ -541,6 +537,22 @@ found:
        }
        spin_unlock(&base->lock);
 
+       /*
+        * It can happen that other CPUs service timer IRQs and increment
+        * jiffies, but we have not yet got a local timer tick to process
+        * the timer wheels.  In that case, the expiry time can be before
+        * jiffies, but since the high-resolution timer here is relative to
+        * jiffies, the default expression when high-resolution timers are
+        * not active,
+        *
+        *   time_before(MAX_JIFFY_OFFSET + jiffies, expires)
+        *
+        * would falsely evaluate to true.  If that is the case, just
+        * return jiffies so that we can immediately fire the local timer
+        */
+       if (time_before(expires, jiffies))
+               return jiffies;
+
        if (time_before(hr_expires, expires))
                return hr_expires;
 
@@ -780,24 +792,93 @@ u64 current_tick_length(void)
        return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
 }
 
+/* XXX - all of this timekeeping code should be later moved to time.c */
+#include <linux/clocksource.h>
+static struct clocksource *clock; /* pointer to current clocksource */
+static cycle_t last_clock_cycle;  /* cycle value at last update_wall_time */
 /*
- * Using a loop looks inefficient, but "ticks" is
- * usually just one (we shouldn't be losing ticks,
- * we're doing this this way mainly for interrupt
- * latency reasons, not because we think we'll
- * have lots of lost timer ticks
+ * timekeeping_init - Initializes the clocksource and common timekeeping values
  */
-static void update_wall_time(unsigned long ticks)
+void __init timekeeping_init(void)
 {
-       do {
-               ticks--;
+       unsigned long flags;
+
+       write_seqlock_irqsave(&xtime_lock, flags);
+       clock = get_next_clocksource();
+       calculate_clocksource_interval(clock, tick_nsec);
+       last_clock_cycle = read_clocksource(clock);
+       ntp_clear();
+       write_sequnlock_irqrestore(&xtime_lock, flags);
+}
+
+
+/*
+ * timekeeping_resume - Resumes the generic timekeeping subsystem.
+ * @dev:       unused
+ *
+ * This is for the generic clocksource timekeeping.
+ * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are
+ * still managed by arch specific suspend/resume code.
+ */
+static int timekeeping_resume(struct sys_device *dev)
+{
+       unsigned long flags;
+
+       write_seqlock_irqsave(&xtime_lock, flags);
+       /* restart the last cycle value */
+       last_clock_cycle = read_clocksource(clock);
+       write_sequnlock_irqrestore(&xtime_lock, flags);
+       return 0;
+}
+
+/* sysfs resume/suspend bits for timekeeping */
+static struct sysdev_class timekeeping_sysclass = {
+       .resume         = timekeeping_resume,
+       set_kset_name("timekeeping"),
+};
+
+static struct sys_device device_timer = {
+       .id             = 0,
+       .cls            = &timekeeping_sysclass,
+};
+
+static int __init timekeeping_init_device(void)
+{
+       int error = sysdev_class_register(&timekeeping_sysclass);
+       if (!error)
+               error = sysdev_register(&device_timer);
+       return error;
+}
+
+device_initcall(timekeeping_init_device);
+
+/*
+ * update_wall_time - Uses the current clocksource to increment the wall time
+ *
+ * Called from the timer interrupt, must hold a write on xtime_lock.
+ */
+static void update_wall_time(void)
+{
+       cycle_t now, offset;
+
+       now = read_clocksource(clock);
+       offset = (now - last_clock_cycle)&clock->mask;
+
+       /* normally this loop will run just once, however in the
+        * case of lost or late ticks, it will accumulate correctly.
+        */
+       while (offset > clock->interval_cycles) {
+               /* accumulate one interval */
+               last_clock_cycle += clock->interval_cycles;
+               offset -= clock->interval_cycles;
+
                update_wall_time_one_tick();
                if (xtime.tv_nsec >= 1000000000) {
                        xtime.tv_nsec -= 1000000000;
                        xtime.tv_sec++;
                        second_overflow();
                }
-       } while (ticks);
+       }
 }
 
 /*
@@ -903,10 +984,8 @@ static inline void update_times(void)
        unsigned long ticks;
 
        ticks = jiffies - wall_jiffies;
-       if (ticks) {
-               wall_jiffies += ticks;
-               update_wall_time(ticks);
-       }
+       wall_jiffies += ticks;
+       update_wall_time();
        calc_load(ticks);
 }
   
@@ -1314,7 +1393,7 @@ static void __devinit migrate_timers(int cpu)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int __devinit timer_cpu_notify(struct notifier_block *self, 
+static int timer_cpu_notify(struct notifier_block *self,
                                unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;