]> err.no Git - linux-2.6/blobdiff - kernel/sched.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6] / kernel / sched.c
index 589e55a42214bd527f78ea3e9367c396bae30f05..cfaf3fabeecdf56ebd731070956743df711a89c6 100644 (file)
@@ -667,9 +667,13 @@ static int effective_prio(task_t *p)
 /*
  * __activate_task - move a task to the runqueue.
  */
-static inline void __activate_task(task_t *p, runqueue_t *rq)
+static void __activate_task(task_t *p, runqueue_t *rq)
 {
-       enqueue_task(p, rq->active);
+       prio_array_t *target = rq->active;
+
+       if (batch_task(p))
+               target = rq->expired;
+       enqueue_task(p, target);
        rq->nr_running++;
 }
 
@@ -688,7 +692,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
        unsigned long long __sleep_time = now - p->timestamp;
        unsigned long sleep_time;
 
-       if (unlikely(p->policy == SCHED_BATCH))
+       if (batch_task(p))
                sleep_time = 0;
        else {
                if (__sleep_time > NS_MAX_SLEEP_AVG)
@@ -700,14 +704,18 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
        if (likely(sleep_time > 0)) {
                /*
                 * User tasks that sleep a long time are categorised as
-                * idle and will get just interactive status to stay active &
-                * prevent them suddenly becoming cpu hogs and starving
-                * other processes.
+                * idle. They will only have their sleep_avg increased to a
+                * level that makes them just interactive priority to stay
+                * active yet prevent them suddenly becoming cpu hogs and
+                * starving other processes.
                 */
-               if (p->mm && p->sleep_type != SLEEP_NONINTERACTIVE &&
-                       sleep_time > INTERACTIVE_SLEEP(p)) {
-                               p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
-                                               DEF_TIMESLICE);
+               if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) {
+                               unsigned long ceiling;
+
+                               ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG -
+                                       DEF_TIMESLICE);
+                               if (p->sleep_avg < ceiling)
+                                       p->sleep_avg = ceiling;
                } else {
                        /*
                         * Tasks waking from uninterruptible sleep are
@@ -3019,8 +3027,7 @@ go_idle:
                        dequeue_task(next, array);
                        next->prio = new_prio;
                        enqueue_task(next, array);
-               } else
-                       requeue_task(next, array);
+               }
        }
        next->sleep_type = SLEEP_NORMAL;
 switch_tasks:
@@ -3879,6 +3886,10 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
                        !capable(CAP_SYS_NICE))
                goto out_unlock;
 
+       retval = security_task_setscheduler(p, 0, NULL);
+       if (retval)
+               goto out_unlock;
+
        cpus_allowed = cpuset_cpus_allowed(p);
        cpus_and(new_mask, new_mask, cpus_allowed);
        retval = set_cpus_allowed(p, new_mask);
@@ -3947,7 +3958,10 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
        if (!p)
                goto out_unlock;
 
-       retval = 0;
+       retval = security_task_getscheduler(p);
+       if (retval)
+               goto out_unlock;
+
        cpus_and(*mask, p->cpus_allowed, cpu_online_map);
 
 out_unlock:
@@ -4039,6 +4053,9 @@ asmlinkage long sys_sched_yield(void)
 
 static inline void __cond_resched(void)
 {
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+       __might_sleep(__FILE__, __LINE__);
+#endif
        /*
         * The BKS might be reacquired before we have dropped
         * PREEMPT_ACTIVE, which could trigger a second
@@ -4135,7 +4152,7 @@ EXPORT_SYMBOL(yield);
  */
 void __sched io_schedule(void)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
 
        atomic_inc(&rq->nr_iowait);
        schedule();
@@ -4146,7 +4163,7 @@ EXPORT_SYMBOL(io_schedule);
 
 long __sched io_schedule_timeout(long timeout)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
        long ret;
 
        atomic_inc(&rq->nr_iowait);
@@ -4230,7 +4247,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
        if (retval)
                goto out_unlock;
 
-       jiffies_to_timespec(p->policy & SCHED_FIFO ?
+       jiffies_to_timespec(p->policy == SCHED_FIFO ?
                                0 : task_timeslice(p), &t);
        read_unlock(&tasklist_lock);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -4739,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+               if (!cpu_rq(cpu)->migration_thread)
+                       break;
                /* Unbind it from offline cpu so it can run.  Fall thru. */
                kthread_bind(cpu_rq(cpu)->migration_thread,
                             any_online_cpu(cpu_online_map));
@@ -4781,7 +4800,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
 /* Register at highest priority so that task migration (migrate_all_tasks)
  * happens before everything else.
  */
-static struct notifier_block __devinitdata migration_notifier = {
+static struct notifier_block migration_notifier = {
        .notifier_call = migration_call,
        .priority = 10
 };