]> err.no Git - linux-2.6/blobdiff - kernel/hrtimer.c
V4L/DVB (7653): tuner-xc2028: drop the severity of version reporting
[linux-2.6] / kernel / hrtimer.c
index 911e87d0440dee2216f2cbd72fea4ba077d1dd81..f78777abe769e1e3608b8f751d1c996f05aeea13 100644 (file)
@@ -1238,51 +1238,50 @@ void hrtimer_run_pending(void)
 /*
  * Called from hardirq context every jiffy
  */
-static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
-                                    int index)
+void hrtimer_run_queues(void)
 {
        struct rb_node *node;
-       struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       struct hrtimer_clock_base *base;
+       int index, gettime = 1;
 
-       if (!base->first)
+       if (hrtimer_hres_active())
                return;
 
-       if (base->get_softirq_time)
-               base->softirq_time = base->get_softirq_time();
-
-       spin_lock(&cpu_base->lock);
+       for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
+               base = &cpu_base->clock_base[index];
 
-       while ((node = base->first)) {
-               struct hrtimer *timer;
-
-               timer = rb_entry(node, struct hrtimer, node);
-               if (base->softirq_time.tv64 <= timer->expires.tv64)
-                       break;
-
-               if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
-                       __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
-                       list_add_tail(&timer->cb_entry,
-                                       &base->cpu_base->cb_pending);
+               if (!base->first)
                        continue;
+
+               if (base->get_softirq_time)
+                       base->softirq_time = base->get_softirq_time();
+               else if (gettime) {
+                       hrtimer_get_softirq_time(cpu_base);
+                       gettime = 0;
                }
 
-               __run_hrtimer(timer);
-       }
-       spin_unlock(&cpu_base->lock);
-}
+               spin_lock(&cpu_base->lock);
 
-void hrtimer_run_queues(void)
-{
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-       int i;
+               while ((node = base->first)) {
+                       struct hrtimer *timer;
 
-       if (hrtimer_hres_active())
-               return;
+                       timer = rb_entry(node, struct hrtimer, node);
+                       if (base->softirq_time.tv64 <= timer->expires.tv64)
+                               break;
 
-       hrtimer_get_softirq_time(cpu_base);
+                       if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
+                               __remove_hrtimer(timer, base,
+                                       HRTIMER_STATE_PENDING, 0);
+                               list_add_tail(&timer->cb_entry,
+                                       &base->cpu_base->cb_pending);
+                               continue;
+                       }
 
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
-               run_hrtimer_queue(cpu_base, i);
+                       __run_hrtimer(timer);
+               }
+               spin_unlock(&cpu_base->lock);
+       }
 }
 
 /*
@@ -1424,7 +1423,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
        int i;
 
        spin_lock_init(&cpu_base->lock);
-       lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key);
 
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
                cpu_base->clock_base[i].cpu_base = cpu_base;
@@ -1465,16 +1463,16 @@ static void migrate_hrtimers(int cpu)
        tick_cancel_sched_timer(cpu);
 
        local_irq_disable();
-       double_spin_lock(&new_base->lock, &old_base->lock,
-                        smp_processor_id() < cpu);
+       spin_lock(&new_base->lock);
+       spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
                migrate_hrtimer_list(&old_base->clock_base[i],
                                     &new_base->clock_base[i]);
        }
 
-       double_spin_unlock(&new_base->lock, &old_base->lock,
-                          smp_processor_id() < cpu);
+       spin_unlock(&old_base->lock);
+       spin_unlock(&new_base->lock);
        local_irq_enable();
        put_cpu_var(hrtimer_bases);
 }