]> err.no Git - linux-2.6/blobdiff - kernel/sched.c
sched: add CFS credits
[linux-2.6] / kernel / sched.c
index ef6b6bb3e0b21084eb1aa43877a01f6bfe993784..9fbced64bfee004e1275088bb22eb4ac848e1733 100644 (file)
  *             by Davide Libenzi, preemptible kernel bits by Robert Love.
  *  2003-09-03 Interactivity tuning by Con Kolivas.
  *  2004-04-02 Scheduler domains code by Nick Piggin
+ *  2007-04-15  Work begun on replacing all interactivity tuning with a
+ *              fair scheduling design by Con Kolivas.
+ *  2007-05-05  Load balancing (smp-nice) and other improvements
+ *              by Peter Williams
+ *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
+ *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
  */
 
 #include <linux/mm.h>
@@ -3699,74 +3705,85 @@ out:
 }
 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
 
-
-#define        SLEEP_ON_VAR                                    \
-       unsigned long flags;                            \
-       wait_queue_t wait;                              \
-       init_waitqueue_entry(&wait, current);
-
-#define SLEEP_ON_HEAD                                  \
-       spin_lock_irqsave(&q->lock,flags);              \
-       __add_wait_queue(q, &wait);                     \
+static inline void
+sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+{
+       spin_lock_irqsave(&q->lock, *flags);
+       __add_wait_queue(q, wait);
        spin_unlock(&q->lock);
+}
 
-#define        SLEEP_ON_TAIL                                   \
-       spin_lock_irq(&q->lock);                        \
-       __remove_wait_queue(q, &wait);                  \
-       spin_unlock_irqrestore(&q->lock, flags);
+static inline void
+sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+{
+       spin_lock_irq(&q->lock);
+       __remove_wait_queue(q, wait);
+       spin_unlock_irqrestore(&q->lock, *flags);
+}
 
-void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
+void __sched interruptible_sleep_on(wait_queue_head_t *q)
 {
-       SLEEP_ON_VAR
+       unsigned long flags;
+       wait_queue_t wait;
+
+       init_waitqueue_entry(&wait, current);
 
        current->state = TASK_INTERRUPTIBLE;
 
-       SLEEP_ON_HEAD
+       sleep_on_head(q, &wait, &flags);
        schedule();
-       SLEEP_ON_TAIL
+       sleep_on_tail(q, &wait, &flags);
 }
 EXPORT_SYMBOL(interruptible_sleep_on);
 
-long fastcall __sched
+long __sched
 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-       SLEEP_ON_VAR
+       unsigned long flags;
+       wait_queue_t wait;
+
+       init_waitqueue_entry(&wait, current);
 
        current->state = TASK_INTERRUPTIBLE;
 
-       SLEEP_ON_HEAD
+       sleep_on_head(q, &wait, &flags);
        timeout = schedule_timeout(timeout);
-       SLEEP_ON_TAIL
+       sleep_on_tail(q, &wait, &flags);
 
        return timeout;
 }
 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
 
-void fastcall __sched sleep_on(wait_queue_head_t *q)
+void __sched sleep_on(wait_queue_head_t *q)
 {
-       SLEEP_ON_VAR
+       unsigned long flags;
+       wait_queue_t wait;
+
+       init_waitqueue_entry(&wait, current);
 
        current->state = TASK_UNINTERRUPTIBLE;
 
-       SLEEP_ON_HEAD
+       sleep_on_head(q, &wait, &flags);
        schedule();
-       SLEEP_ON_TAIL
+       sleep_on_tail(q, &wait, &flags);
 }
 EXPORT_SYMBOL(sleep_on);
 
-long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-       SLEEP_ON_VAR
+       unsigned long flags;
+       wait_queue_t wait;
+
+       init_waitqueue_entry(&wait, current);
 
        current->state = TASK_UNINTERRUPTIBLE;
 
-       SLEEP_ON_HEAD
+       sleep_on_head(q, &wait, &flags);
        timeout = schedule_timeout(timeout);
-       SLEEP_ON_TAIL
+       sleep_on_tail(q, &wait, &flags);
 
        return timeout;
 }
-
 EXPORT_SYMBOL(sleep_on_timeout);
 
 #ifdef CONFIG_RT_MUTEXES