]> err.no Git - linux-2.6/blobdiff - kernel/sched.c
[KTIME]: Introduce ktime_add_us
[linux-2.6] / kernel / sched.c
index f8cf78c6af2edfb817cbea270fdcfd8c8a848074..9fbced64bfee004e1275088bb22eb4ac848e1733 100644 (file)
  *             by Davide Libenzi, preemptible kernel bits by Robert Love.
  *  2003-09-03 Interactivity tuning by Con Kolivas.
  *  2004-04-02 Scheduler domains code by Nick Piggin
+ *  2007-04-15  Work begun on replacing all interactivity tuning with a
+ *              fair scheduling design by Con Kolivas.
+ *  2007-05-05  Load balancing (smp-nice) and other improvements
+ *              by Peter Williams
+ *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
+ *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
  */
 
 #include <linux/mm.h>
@@ -1265,9 +1271,9 @@ static int sched_balance_self(int cpu, int flag)
        struct sched_domain *tmp, *sd = NULL;
 
        for_each_domain(cpu, tmp) {
-               /*
-                * If power savings logic is enabled for a domain, stop there.
-                */
+               /*
+                * If power savings logic is enabled for a domain, stop there.
+                */
                if (tmp->flags & SD_POWERSAVINGS_BALANCE)
                        break;
                if (tmp->flags & flag)
@@ -1350,9 +1356,9 @@ static int wake_idle(int cpu, struct task_struct *p)
                                if (idle_cpu(i))
                                        return i;
                        }
-               }
-               else
+               } else {
                        break;
+               }
        }
        return cpu;
 }
@@ -1702,7 +1708,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
                /*
                 * Remove function-return probe instances associated with this
                 * task and put them back on the free list.
-                */
+                */
                kprobe_flush_task(prev);
                put_task_struct(prev);
        }
@@ -3699,74 +3705,85 @@ out:
 }
 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
 
-
-#define        SLEEP_ON_VAR                                    \
-       unsigned long flags;                            \
-       wait_queue_t wait;                              \
-       init_waitqueue_entry(&wait, current);
-
-#define SLEEP_ON_HEAD                                  \
-       spin_lock_irqsave(&q->lock,flags);              \
-       __add_wait_queue(q, &wait);                     \
+static inline void
+sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+{
+       spin_lock_irqsave(&q->lock, *flags);
+       __add_wait_queue(q, wait);
        spin_unlock(&q->lock);
+}
 
-#define        SLEEP_ON_TAIL                                   \
-       spin_lock_irq(&q->lock);                        \
-       __remove_wait_queue(q, &wait);                  \
-       spin_unlock_irqrestore(&q->lock, flags);
+static inline void
+sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+{
+       spin_lock_irq(&q->lock);
+       __remove_wait_queue(q, wait);
+       spin_unlock_irqrestore(&q->lock, *flags);
+}
 
-void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
+void __sched interruptible_sleep_on(wait_queue_head_t *q)
 {
-       SLEEP_ON_VAR
+       unsigned long flags;
+       wait_queue_t wait;
+
+       init_waitqueue_entry(&wait, current);
 
        current->state = TASK_INTERRUPTIBLE;
 
-       SLEEP_ON_HEAD
+       sleep_on_head(q, &wait, &flags);
        schedule();
-       SLEEP_ON_TAIL
+       sleep_on_tail(q, &wait, &flags);
 }
 EXPORT_SYMBOL(interruptible_sleep_on);
 
-long fastcall __sched
+long __sched
 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-       SLEEP_ON_VAR
+       unsigned long flags;
+       wait_queue_t wait;
+
+       init_waitqueue_entry(&wait, current);
 
        current->state = TASK_INTERRUPTIBLE;
 
-       SLEEP_ON_HEAD
+       sleep_on_head(q, &wait, &flags);
        timeout = schedule_timeout(timeout);
-       SLEEP_ON_TAIL
+       sleep_on_tail(q, &wait, &flags);
 
        return timeout;
 }
 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
 
-void fastcall __sched sleep_on(wait_queue_head_t *q)
+void __sched sleep_on(wait_queue_head_t *q)
 {
-       SLEEP_ON_VAR
+       unsigned long flags;
+       wait_queue_t wait;
+
+       init_waitqueue_entry(&wait, current);
 
        current->state = TASK_UNINTERRUPTIBLE;
 
-       SLEEP_ON_HEAD
+       sleep_on_head(q, &wait, &flags);
        schedule();
-       SLEEP_ON_TAIL
+       sleep_on_tail(q, &wait, &flags);
 }
 EXPORT_SYMBOL(sleep_on);
 
-long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-       SLEEP_ON_VAR
+       unsigned long flags;
+       wait_queue_t wait;
+
+       init_waitqueue_entry(&wait, current);
 
        current->state = TASK_UNINTERRUPTIBLE;
 
-       SLEEP_ON_HEAD
+       sleep_on_head(q, &wait, &flags);
        timeout = schedule_timeout(timeout);
-       SLEEP_ON_TAIL
+       sleep_on_tail(q, &wait, &flags);
 
        return timeout;
 }
-
 EXPORT_SYMBOL(sleep_on_timeout);
 
 #ifdef CONFIG_RT_MUTEXES
@@ -5920,6 +5937,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
                sched_group_nodes[i] = sg;
                for_each_cpu_mask(j, nodemask) {
                        struct sched_domain *sd;
+
                        sd = &per_cpu(node_domains, j);
                        sd->groups = sg;
                }