X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=kernel%2Fsched.c;h=4107db0dc0919767b72b31e8dd88a5580294a578;hb=cc14cf46da215a9df1c0a4388763a68769ef9e53;hp=e2b0d3e4dd067ad4e6df0362f265189ac8d17049;hpb=3bd19078c215d15e20b4447d9c4aa0065813b207;p=linux-2.6 diff --git a/kernel/sched.c b/kernel/sched.c index e2b0d3e4dd..4107db0dc0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3877,6 +3877,13 @@ asmlinkage long sys_sched_yield(void) static inline void __cond_resched(void) { + /* + * The BKS might be reacquired before we have dropped + * PREEMPT_ACTIVE, which could trigger a second + * cond_resched() call. + */ + if (unlikely(preempt_count())) + return; do { add_preempt_count(PREEMPT_ACTIVE); schedule(); @@ -4166,6 +4173,14 @@ void show_state(void) read_unlock(&tasklist_lock); } +/** + * init_idle - set up an idle thread for a given CPU + * @idle: task in question + * @cpu: cpu the idle task belongs to + * + * NOTE: this function does not set the idle thread's NEED_RESCHED + * flag, to make booting more robust. + */ void __devinit init_idle(task_t *idle, int cpu) { runqueue_t *rq = cpu_rq(cpu); @@ -4183,7 +4198,6 @@ void __devinit init_idle(task_t *idle, int cpu) #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) idle->oncpu = 1; #endif - set_tsk_need_resched(idle); spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */