]> err.no Git - linux-2.6/blobdiff - kernel/sched.c
libata: Update 'passthru' branch for latest libata
[linux-2.6] / kernel / sched.c
index 5dadcc6df7dd111b94473877da3b69ae2176094a..deca041fc3645670055502888171811404d249f9 100644 (file)
@@ -2906,6 +2906,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  * @q: the waitqueue
  * @mode: which threads
  * @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @key: is directly passed to the wakeup function
  */
 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
                                int nr_exclusive, void *key)
@@ -2928,7 +2929,7 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
 }
 
 /**
- * __wake_up - sync- wake up threads blocked on a waitqueue.
+ * __wake_up_sync - wake up threads blocked on a waitqueue.
  * @q: the waitqueue
  * @mode: which threads
  * @nr_exclusive: how many wake-one or wake-many threads to wake up
@@ -3754,19 +3755,22 @@ EXPORT_SYMBOL(cond_resched);
  */
 int cond_resched_lock(spinlock_t * lock)
 {
+       int ret = 0;
+
        if (need_lockbreak(lock)) {
                spin_unlock(lock);
                cpu_relax();
+               ret = 1;
                spin_lock(lock);
        }
        if (need_resched()) {
                _raw_spin_unlock(lock);
                preempt_enable_no_resched();
                __cond_resched();
+               ret = 1;
                spin_lock(lock);
-               return 1;
        }
-       return 0;
+       return ret;
 }
 
 EXPORT_SYMBOL(cond_resched_lock);
@@ -3810,7 +3814,7 @@ EXPORT_SYMBOL(yield);
  */
 void __sched io_schedule(void)
 {
-       struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
+       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
 
        atomic_inc(&rq->nr_iowait);
        schedule();
@@ -3821,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule);
 
 long __sched io_schedule_timeout(long timeout)
 {
-       struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
+       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
        long ret;
 
        atomic_inc(&rq->nr_iowait);
@@ -4242,7 +4246,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
 
        /* No more Mr. Nice Guy. */
        if (dest_cpu == NR_CPUS) {
-               tsk->cpus_allowed = cpuset_cpus_allowed(tsk);
+               cpus_setall(tsk->cpus_allowed);
                dest_cpu = any_online_cpu(tsk->cpus_allowed);
 
                /*