X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fsched_fair.c;h=01859f662ab7c834826f9197e93ebe9d44ad548a;hb=5c27d0f11e679e6e5b6b2047b8658409699d4a67;hp=a17b785d7000cd876045cae3e9c1fdba1f39ce9e;hpb=821f3eff7cdb9d6c7076effabd46c96c322daed1;p=linux-2.6 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a17b785d70..01859f662a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -876,6 +876,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) } } +#ifdef CONFIG_SMP /************************************************** * Fair scheduling class load-balancing methods: */ @@ -936,12 +937,11 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) static unsigned long load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, + unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio) { struct cfs_rq *busy_cfs_rq; - unsigned long load_moved, total_nr_moved = 0, nr_moved; long rem_load_move = max_load_move; struct rq_iterator cfs_rq_iterator; @@ -969,25 +969,48 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, #else # define maxload rem_load_move #endif - /* pass busy_cfs_rq argument into + /* + * pass busy_cfs_rq argument into * load_balance_[start|next]_fair iterators */ cfs_rq_iterator.arg = busy_cfs_rq; - nr_moved = balance_tasks(this_rq, this_cpu, busiest, - max_nr_move, maxload, sd, idle, all_pinned, - &load_moved, this_best_prio, &cfs_rq_iterator); - - total_nr_moved += nr_moved; - max_nr_move -= nr_moved; - rem_load_move -= load_moved; + rem_load_move -= balance_tasks(this_rq, this_cpu, busiest, + maxload, sd, idle, all_pinned, + this_best_prio, + &cfs_rq_iterator); - if (max_nr_move <= 0 || rem_load_move <= 0) + if (rem_load_move <= 0) break; } return max_load_move - rem_load_move; } +static int +move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle) +{ + struct cfs_rq *busy_cfs_rq; + struct rq_iterator cfs_rq_iterator; + + cfs_rq_iterator.start = load_balance_start_fair; + cfs_rq_iterator.next = load_balance_next_fair; + + for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { + /* + * pass busy_cfs_rq argument into + * load_balance_[start|next]_fair iterators + */ + cfs_rq_iterator.arg = busy_cfs_rq; + if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, + &cfs_rq_iterator)) + return 1; + } + + return 0; +} +#endif + /* * scheduler tick hitting a task of our scheduling class: */ @@ -1002,7 +1025,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr) } } -#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) +#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) /* * Share the fairness runtime between parent and child, thus the @@ -1031,12 +1054,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) swap(curr->vruntime, se->vruntime); } - update_stats_enqueue(cfs_rq, se); - check_spread(cfs_rq, se); - check_spread(cfs_rq, curr); - __enqueue_entity(cfs_rq, se); - account_entity_enqueue(cfs_rq, se); se->peer_preempt = 0; + enqueue_task_fair(rq, p, 0); resched_task(rq->curr); } @@ -1067,7 +1086,10 @@ static const struct sched_class fair_sched_class = { .pick_next_task = pick_next_task_fair, .put_prev_task = put_prev_task_fair, +#ifdef CONFIG_SMP .load_balance = load_balance_fair, + .move_one_task = move_one_task_fair, +#endif .set_curr_task = set_curr_task_fair, .task_tick = task_tick_fair,