]> err.no Git - linux-2.6/commitdiff
sched: clean up wakeup balancing, move wake_affine()
authorIngo Molnar <mingo@elte.hu>
Sun, 16 Mar 2008 19:36:10 +0000 (20:36 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 19 Mar 2008 03:27:52 +0000 (04:27 +0100)
split out the affine-wakeup bits.

No code changed:

kernel/sched.o:

   text    data     bss     dec     hex filename
  42521    2858     232   45611    b22b sched.o.before
  42521    2858     232   45611    b22b sched.o.after

md5:
   9d76738f1272aa82f0b7affd2f51df6b  sched.o.before.asm
   09b31c44e9aff8666f72773dc433e2df  sched.o.after.asm

(the md5's changed because stack slots changed and some registers
get scheduled by gcc in a different order - but otherwise the before
and after assembly is instruction for instruction equivalent.)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index f2cc59080efa4f9ae1e12aa30afe2e3fe08a86f9..70679b26669368a560eabdb755a9080bd8e1dc10 100644 (file)
@@ -980,12 +980,59 @@ static inline int wake_idle(int cpu, struct task_struct *p)
 #endif
 
 #ifdef CONFIG_SMP
+
+static int
+wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p,
+           int cpu, int this_cpu, int sync, int idx,
+           unsigned long load, unsigned long this_load,
+           unsigned int imbalance)
+{
+       unsigned long tl = this_load;
+       unsigned long tl_per_task;
+
+       if (!(this_sd->flags & SD_WAKE_AFFINE))
+               return 0;
+
+       /*
+        * Attract cache-cold tasks on sync wakeups:
+        */
+       if (sync && !task_hot(p, rq->clock, this_sd))
+               return 1;
+
+       schedstat_inc(p, se.nr_wakeups_affine_attempts);
+       tl_per_task = cpu_avg_load_per_task(this_cpu);
+
+       /*
+        * If sync wakeup then subtract the (maximum possible)
+        * effect of the currently running task from the load
+        * of the current CPU:
+        */
+       if (sync)
+               tl -= current->se.load.weight;
+
+       if ((tl <= load && tl + target_load(cpu, idx) <= tl_per_task) ||
+                       100*(tl + p->se.load.weight) <= imbalance*load) {
+               /*
+                * This domain has SD_WAKE_AFFINE and
+                * p is cache cold in this domain, and
+                * there is no bad imbalance.
+                */
+               schedstat_inc(this_sd, ttwu_move_affine);
+               schedstat_inc(p, se.nr_wakeups_affine);
+
+               return 1;
+       }
+       return 0;
+}
+
 static int select_task_rq_fair(struct task_struct *p, int sync)
 {
-       int cpu, this_cpu;
-       struct rq *rq;
        struct sched_domain *sd, *this_sd = NULL;
-       int new_cpu;
+       unsigned long load, this_load;
+       int cpu, this_cpu, new_cpu;
+       unsigned int imbalance;
+       struct rq *rq;
+       int idx;
 
        cpu      = task_cpu(p);
        rq       = task_rq(p);
@@ -1008,66 +1055,35 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
        /*
         * Check for affine wakeup and passive balancing possibilities.
         */
-       if (this_sd) {
-               int idx = this_sd->wake_idx;
-               unsigned int imbalance;
-               unsigned long load, this_load;
-
-               imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
-
-               load = source_load(cpu, idx);
-               this_load = target_load(this_cpu, idx);
-
-               new_cpu = this_cpu; /* Wake to this CPU if we can */
-
-               if (this_sd->flags & SD_WAKE_AFFINE) {
-                       unsigned long tl = this_load;
-                       unsigned long tl_per_task;
-
-                       /*
-                        * Attract cache-cold tasks on sync wakeups:
-                        */
-                       if (sync && !task_hot(p, rq->clock, this_sd))
-                               goto out_set_cpu;
-
-                       schedstat_inc(p, se.nr_wakeups_affine_attempts);
-                       tl_per_task = cpu_avg_load_per_task(this_cpu);
-
-                       /*
-                        * If sync wakeup then subtract the (maximum possible)
-                        * effect of the currently running task from the load
-                        * of the current CPU:
-                        */
-                       if (sync)
-                               tl -= current->se.load.weight;
-
-                       if ((tl <= load &&
-                               tl + target_load(cpu, idx) <= tl_per_task) ||
-                              100*(tl + p->se.load.weight) <= imbalance*load) {
-                               /*
-                                * This domain has SD_WAKE_AFFINE and
-                                * p is cache cold in this domain, and
-                                * there is no bad imbalance.
-                                */
-                               schedstat_inc(this_sd, ttwu_move_affine);
-                               schedstat_inc(p, se.nr_wakeups_affine);
-                               goto out_set_cpu;
-                       }
-               }
+       if (!this_sd)
+               goto out_keep_cpu;
 
-               /*
-                * Start passive balancing when half the imbalance_pct
-                * limit is reached.
-                */
-               if (this_sd->flags & SD_WAKE_BALANCE) {
-                       if (imbalance*this_load <= 100*load) {
-                               schedstat_inc(this_sd, ttwu_move_balance);
-                               schedstat_inc(p, se.nr_wakeups_passive);
-                               goto out_set_cpu;
-                       }
+       idx = this_sd->wake_idx;
+
+       imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
+
+       load = source_load(cpu, idx);
+       this_load = target_load(this_cpu, idx);
+
+       new_cpu = this_cpu; /* Wake to this CPU if we can */
+
+       if (wake_affine(rq, this_sd, p, cpu, this_cpu, sync, idx,
+                                    load, this_load, imbalance))
+               goto out_set_cpu;
+
+       /*
+        * Start passive balancing when half the imbalance_pct
+        * limit is reached.
+        */
+       if (this_sd->flags & SD_WAKE_BALANCE) {
+               if (imbalance*this_load <= 100*load) {
+                       schedstat_inc(this_sd, ttwu_move_balance);
+                       schedstat_inc(p, se.nr_wakeups_passive);
+                       goto out_set_cpu;
                }
        }
 
+out_keep_cpu:
        new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
 out_set_cpu:
        return wake_idle(new_cpu, p);