]> err.no Git - linux-2.6/commitdiff
sched: remove sleep_type
authorIngo Molnar <mingo@elte.hu>
Mon, 9 Jul 2007 16:51:59 +0000 (18:51 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 9 Jul 2007 16:51:59 +0000 (18:51 +0200)
remove the sleep_type heuristics from the core scheduler - scheduling
policy is implemented in the scheduling-policy modules. (and CFS does
not use this type of sleep-type heuristics)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/sched.h
kernel/sched.c

index 4dcc61cca00aa5c81a3ec6dcb67d85f526f63a74..be2460e6f55bb1389387b507d81fa5d5f5f5ca50 100644 (file)
@@ -788,13 +788,6 @@ struct mempolicy;
 struct pipe_inode_info;
 struct uts_namespace;
 
-enum sleep_type {
-       SLEEP_NORMAL,
-       SLEEP_NONINTERACTIVE,
-       SLEEP_INTERACTIVE,
-       SLEEP_INTERRUPTED,
-};
-
 struct prio_array;
 struct rq;
 struct sched_domain;
@@ -905,7 +898,6 @@ struct task_struct {
        unsigned long sleep_avg;
        unsigned long long timestamp, last_ran;
        unsigned long long sched_time; /* sched_clock time spent running */
-       enum sleep_type sleep_type;
 
        unsigned int policy;
        cpumask_t cpus_allowed;
index 6e5a89ba4f76a0e083836ae4a90b4b0c052e2f3b..26795adab3ad9a97538b5f3c39c2bc9acb6387a8 100644 (file)
@@ -990,32 +990,7 @@ static int recalc_task_prio(struct task_struct *p, unsigned long long now)
                         * with one single large enough sleep.
                         */
                        p->sleep_avg = ceiling;
-                       /*
-                        * Using INTERACTIVE_SLEEP() as a ceiling places a
-                        * nice(0) task 1ms sleep away from promotion, and
-                        * gives it 700ms to round-robin with no chance of
-                        * being demoted.  This is more than generous, so
-                        * mark this sleep as non-interactive to prevent the
-                        * on-runqueue bonus logic from intervening should
-                        * this task not receive cpu immediately.
-                        */
-                       p->sleep_type = SLEEP_NONINTERACTIVE;
                } else {
-                       /*
-                        * Tasks waking from uninterruptible sleep are
-                        * limited in their sleep_avg rise as they
-                        * are likely to be waiting on I/O
-                        */
-                       if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
-                               if (p->sleep_avg >= ceiling)
-                                       sleep_time = 0;
-                               else if (p->sleep_avg + sleep_time >=
-                                        ceiling) {
-                                               p->sleep_avg = ceiling;
-                                               sleep_time = 0;
-                               }
-                       }
-
                        /*
                         * This code gives a bonus to interactive tasks.
                         *
@@ -1069,29 +1044,6 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local)
        }
 
        p->prio = recalc_task_prio(p, now);
-
-       /*
-        * This checks to make sure it's not an uninterruptible task
-        * that is now waking up.
-        */
-       if (p->sleep_type == SLEEP_NORMAL) {
-               /*
-                * Tasks which were woken up by interrupts (ie. hw events)
-                * are most likely of interactive nature. So we give them
-                * the credit of extending their sleep time to the period
-                * of time they spend on the runqueue, waiting for execution
-                * on a CPU, first time around:
-                */
-               if (in_interrupt())
-                       p->sleep_type = SLEEP_INTERRUPTED;
-               else {
-                       /*
-                        * Normal first-time wakeups get a credit too for
-                        * on-runqueue time, but it will be weighted down:
-                        */
-                       p->sleep_type = SLEEP_INTERACTIVE;
-               }
-       }
        p->timestamp = now;
 out:
        __activate_task(p, rq);
@@ -1641,23 +1593,8 @@ out_set_cpu:
 
 out_activate:
 #endif /* CONFIG_SMP */
-       if (old_state == TASK_UNINTERRUPTIBLE) {
+       if (old_state == TASK_UNINTERRUPTIBLE)
                rq->nr_uninterruptible--;
-               /*
-                * Tasks on involuntary sleep don't earn
-                * sleep_avg beyond just interactive state.
-                */
-               p->sleep_type = SLEEP_NONINTERACTIVE;
-       } else
-
-       /*
-        * Tasks that have marked their sleep as noninteractive get
-        * woken up with their sleep average not weighted in an
-        * interactive way.
-        */
-               if (old_state & TASK_NONINTERACTIVE)
-                       p->sleep_type = SLEEP_NONINTERACTIVE;
-
 
        activate_task(p, rq, cpu == this_cpu);
        /*
@@ -3533,12 +3470,6 @@ EXPORT_SYMBOL(sub_preempt_count);
 
 #endif
 
-static inline int interactive_sleep(enum sleep_type sleep_type)
-{
-       return (sleep_type == SLEEP_INTERACTIVE ||
-               sleep_type == SLEEP_INTERRUPTED);
-}
-
 /*
  * schedule() is the main scheduler function.
  */
@@ -3549,7 +3480,7 @@ asmlinkage void __sched schedule(void)
        struct list_head *queue;
        unsigned long long now;
        unsigned long run_time;
-       int cpu, idx, new_prio;
+       int cpu, idx;
        long *switch_count;
        struct rq *rq;
 
@@ -3642,24 +3573,6 @@ need_resched_nonpreemptible:
        queue = array->queue + idx;
        next = list_entry(queue->next, struct task_struct, run_list);
 
-       if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
-               unsigned long long delta = now - next->timestamp;
-               if (unlikely((long long)(now - next->timestamp) < 0))
-                       delta = 0;
-
-               if (next->sleep_type == SLEEP_INTERACTIVE)
-                       delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
-
-               array = next->array;
-               new_prio = recalc_task_prio(next, next->timestamp + delta);
-
-               if (unlikely(next->prio != new_prio)) {
-                       dequeue_task(next, array);
-                       next->prio = new_prio;
-                       enqueue_task(next, array);
-               }
-       }
-       next->sleep_type = SLEEP_NORMAL;
 switch_tasks:
        if (next == rq->idle)
                schedstat_inc(rq, sched_goidle);