]> err.no Git - linux-2.6/blobdiff - kernel/sched_rt.c
sched: fix rq->clock warps on frequency changes
[linux-2.6] / kernel / sched_rt.c
index 1144bf55669d01c571d3d332df086efa39cc5d99..2dac5ebb8bcb186c90ae781336b31148532f2e10 100644 (file)
@@ -94,8 +94,11 @@ static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
        struct sched_rt_entity *rt_se = rt_rq->rt_se;
 
        if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
+               struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
+
                enqueue_rt_entity(rt_se);
-               resched_task(rq_of_rt_rq(rt_rq)->curr);
+               if (rt_rq->highest_prio < curr->prio)
+                       resched_task(curr);
        }
 }
 
@@ -175,7 +178,11 @@ static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
        ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
 
        if (rt_rq->rt_time > ratio) {
+               struct rq *rq = rq_of_rt_rq(rt_rq);
+
+               rq->rt_throttled = 1;
                rt_rq->rt_throttled = 1;
+
                sched_rt_ratio_dequeue(rt_rq);
                return 1;
        }
@@ -183,18 +190,6 @@ static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
        return 0;
 }
 
-static void __update_sched_rt_period(struct rt_rq *rt_rq, u64 period)
-{
-       unsigned long rt_ratio = sched_rt_ratio(rt_rq);
-       u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
-
-       rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
-       if (rt_rq->rt_throttled) {
-               rt_rq->rt_throttled = 0;
-               sched_rt_ratio_enqueue(rt_rq);
-       }
-}
-
 static void update_sched_rt_period(struct rq *rq)
 {
        struct rt_rq *rt_rq;
@@ -204,8 +199,18 @@ static void update_sched_rt_period(struct rq *rq)
                period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
                rq->rt_period_expire += period;
 
-               for_each_leaf_rt_rq(rt_rq, rq)
-                       __update_sched_rt_period(rt_rq, period);
+               for_each_leaf_rt_rq(rt_rq, rq) {
+                       unsigned long rt_ratio = sched_rt_ratio(rt_rq);
+                       u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
+
+                       rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
+                       if (rt_rq->rt_throttled) {
+                               rt_rq->rt_throttled = 0;
+                               sched_rt_ratio_enqueue(rt_rq);
+                       }
+               }
+
+               rq->rt_throttled = 0;
        }
 }
 
@@ -1120,13 +1125,7 @@ static void watchdog(struct rq *rq, struct task_struct *p)
 
                p->rt.timeout++;
                next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
-               if (next > p->rt.timeout) {
-                       u64 next_time = p->se.sum_exec_runtime;
-
-                       next_time += next * (NSEC_PER_SEC/HZ);
-                       if (p->it_sched_expires > next_time)
-                               p->it_sched_expires = next_time;
-               } else
+               if (p->rt.timeout > next)
                        p->it_sched_expires = p->se.sum_exec_runtime;
        }
 }