*/
const_debug unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
+const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+
/**************************************************************
* CFS operations on generic schedulable entities:
*/
se->exec_start = rq_of(cfs_rq)->clock;
}
-/*
- * We are descheduling a task - update its stats:
- */
-static inline void
-update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- se->exec_start = 0;
-}
-
/**************************************************
* Scheduling class queueing methods:
*/
if (prev->on_rq)
update_curr(cfs_rq);
- update_stats_curr_end(cfs_rq, prev);
-
check_spread(cfs_rq, prev);
if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
resched_task(curr);
return;
}
+ /*
+ * Batch tasks do not preempt (their preemption is driven by
+ * the tick):
+ */
+ if (unlikely(p->policy == SCHED_BATCH))
+ return;
if (sched_feat(WAKEUP_PREEMPT)) {
while (!is_same_group(se, pse)) {
}
}
+#ifdef CONFIG_SMP
/**************************************************
* Fair scheduling class load-balancing methods:
*/
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
- unsigned long max_nr_move, unsigned long max_load_move,
+ unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
struct cfs_rq *busy_cfs_rq;
- unsigned long load_moved, total_nr_moved = 0, nr_moved;
long rem_load_move = max_load_move;
struct rq_iterator cfs_rq_iterator;
#else
# define maxload rem_load_move
#endif
- /* pass busy_cfs_rq argument into
+ /*
+ * pass busy_cfs_rq argument into
* load_balance_[start|next]_fair iterators
*/
cfs_rq_iterator.arg = busy_cfs_rq;
- nr_moved = balance_tasks(this_rq, this_cpu, busiest,
- max_nr_move, maxload, sd, idle, all_pinned,
- &load_moved, this_best_prio, &cfs_rq_iterator);
+ rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
+ maxload, sd, idle, all_pinned,
+ this_best_prio,
+ &cfs_rq_iterator);
- total_nr_moved += nr_moved;
- max_nr_move -= nr_moved;
- rem_load_move -= load_moved;
-
- if (max_nr_move <= 0 || rem_load_move <= 0)
+ if (rem_load_move <= 0)
break;
}
return max_load_move - rem_load_move;
}
+static int
+move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ struct sched_domain *sd, enum cpu_idle_type idle)
+{
+ struct cfs_rq *busy_cfs_rq;
+ struct rq_iterator cfs_rq_iterator;
+
+ cfs_rq_iterator.start = load_balance_start_fair;
+ cfs_rq_iterator.next = load_balance_next_fair;
+
+ for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
+ /*
+ * pass busy_cfs_rq argument into
+ * load_balance_[start|next]_fair iterators
+ */
+ cfs_rq_iterator.arg = busy_cfs_rq;
+ if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
+ &cfs_rq_iterator))
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
/*
* scheduler tick hitting a task of our scheduling class:
*/
}
}
-#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
+#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
/*
* Share the fairness runtime between parent and child, thus the
swap(curr->vruntime, se->vruntime);
}
- update_stats_enqueue(cfs_rq, se);
- check_spread(cfs_rq, se);
- check_spread(cfs_rq, curr);
- __enqueue_entity(cfs_rq, se);
- account_entity_enqueue(cfs_rq, se);
se->peer_preempt = 0;
+ enqueue_task_fair(rq, p, 0);
resched_task(rq->curr);
}
.pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair,
+#ifdef CONFIG_SMP
.load_balance = load_balance_fair,
+ .move_one_task = move_one_task_fair,
+#endif
.set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair,