]> err.no Git - linux-2.6/blobdiff - kernel/sched_fair.c
sparc64: Fix end-of-stack checking in save_stack_trace().
[linux-2.6] / kernel / sched_fair.c
index d924c679dfac84bcd027e636cc2f8f3cf3e7a718..cf2cd6ce4cb25ad2bedc59b94205b33b24f8a9e9 100644 (file)
@@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
 #ifdef CONFIG_SCHED_HRTICK
 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 {
-       int requeue = rq->curr == p;
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
@@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
                 * Don't schedule slices shorter than 10000ns, that just
                 * doesn't make sense. Rely on vruntime for fairness.
                 */
-               if (!requeue)
+               if (rq->curr != p)
                        delta = max(10000LL, delta);
 
-               hrtick_start(rq, delta, requeue);
+               hrtick_start(rq, delta);
        }
 }
 #else /* !CONFIG_SCHED_HRTICK */
@@ -1034,7 +1033,7 @@ static int wake_idle(int cpu, struct task_struct *p)
                        && !task_hot(p, task_rq(p)->clock, sd))) {
                        cpus_and(tmp, sd->span, p->cpus_allowed);
                        cpus_and(tmp, tmp, cpu_active_map);
-                       for_each_cpu_mask(i, tmp) {
+                       for_each_cpu_mask_nr(i, tmp) {
                                if (idle_cpu(i)) {
                                        if (i != task_cpu(p)) {
                                                schedstat_inc(p,