X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fsched_fair.c;h=cf2cd6ce4cb25ad2bedc59b94205b33b24f8a9e9;hb=433c5f706856689be25928a99636e724fb3ea7cf;hp=6893b3ed65fe4b6f9d2082ed99a7346e5a370c35;hpb=31656519e132f6612584815f128c83976a9aaaef;p=linux-2.6 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6893b3ed65..cf2cd6ce4c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1003,6 +1003,8 @@ static void yield_task_fair(struct rq *rq) * not idle and an idle cpu is available. The span of cpus to * search starts with cpus closest then further out as needed, * so we always favor a closer, idle cpu. + * Domains may include CPUs that are not usable for migration, + * hence we need to mask them out (cpu_active_map) * * Returns the CPU we should wake onto. */ @@ -1030,7 +1032,8 @@ static int wake_idle(int cpu, struct task_struct *p) || ((sd->flags & SD_WAKE_IDLE_FAR) && !task_hot(p, task_rq(p)->clock, sd))) { cpus_and(tmp, sd->span, p->cpus_allowed); - for_each_cpu_mask(i, tmp) { + cpus_and(tmp, tmp, cpu_active_map); + for_each_cpu_mask_nr(i, tmp) { if (idle_cpu(i)) { if (i != task_cpu(p)) { schedstat_inc(p,