p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
}
+static void update_avg(u64 *avg, u64 sample)
+{
+ s64 diff = sample - *avg;
+ *avg += diff >> 3;
+}
+
static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
{
sched_info_queued(p);
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
{
+ if (sleep && p->se.last_wakeup) {
+ update_avg(&p->se.avg_overlap,
+ p->se.sum_exec_runtime - p->se.last_wakeup);
+ p->se.last_wakeup = 0;
+ }
+
p->sched_class->dequeue_task(rq, p, sleep);
p->se.on_rq = 0;
}
p->sched_class->task_wake_up(rq, p);
#endif
out:
+ current->se.last_wakeup = current->se.sum_exec_runtime;
+
task_rq_unlock(rq, &flags);
return success;
__enqueue_entity(cfs_rq, se);
}
-static void update_avg(u64 *avg, u64 sample)
-{
- s64 diff = sample - *avg;
- *avg += diff >> 3;
-}
-
-static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- if (!se->last_wakeup)
- return;
-
- update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup);
- se->last_wakeup = 0;
-}
-
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{
update_stats_dequeue(cfs_rq, se);
if (sleep) {
- update_avg_stats(cfs_rq, se);
#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
* a reasonable amount of time then attract this newly
* woken task:
*/
- if (sync && balanced && curr->sched_class == &fair_sched_class) {
+ if (sync && balanced) {
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
- p->se.avg_overlap < sysctl_sched_migration_cost)
+ p->se.avg_overlap < sysctl_sched_migration_cost)
return 1;
}
return;
}
- se->last_wakeup = se->sum_exec_runtime;
if (unlikely(se == pse))
return;