__update_rq_clock(rq);
}
-static u64 __rq_clock(struct rq *rq)
-{
- __update_rq_clock(rq);
-
- return rq->clock;
-}
-
-static u64 rq_clock(struct rq *rq)
-{
- update_rq_clock(rq);
- return rq->clock;
-}
-
/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
* See detach_destroy_domains: synchronize_sched for details.
* This function is called /before/ updating rq->ls.load
* and when switching tasks.
*/
-static void update_curr_load(struct rq *rq, u64 now)
+static void update_curr_load(struct rq *rq)
{
struct load_stat *ls = &rq->ls;
u64 start;
start = ls->load_update_start;
- ls->load_update_start = now;
- ls->delta_stat += now - start;
+ ls->load_update_start = rq->clock;
+ ls->delta_stat += rq->clock - start;
/*
* Stagger updates to ls->delta_fair. Very frequent updates
* can be expensive.
__update_curr_load(rq, ls);
}
-static inline void
-inc_load(struct rq *rq, const struct task_struct *p, u64 now)
+static inline void inc_load(struct rq *rq, const struct task_struct *p)
{
- update_curr_load(rq, now);
+ update_curr_load(rq);
update_load_add(&rq->ls.load, p->se.load.weight);
}
-static inline void
-dec_load(struct rq *rq, const struct task_struct *p, u64 now)
+static inline void dec_load(struct rq *rq, const struct task_struct *p)
{
- update_curr_load(rq, now);
+ update_curr_load(rq);
update_load_sub(&rq->ls.load, p->se.load.weight);
}
-static void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
+static void inc_nr_running(struct task_struct *p, struct rq *rq)
{
rq->nr_running++;
- inc_load(rq, p, now);
+ inc_load(rq, p);
}
static void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
{
rq->nr_running--;
- dec_load(rq, p, now);
+ dec_load(rq, p);
}
static void set_load_weight(struct task_struct *p)
enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
{
sched_info_queued(p);
- p->sched_class->enqueue_task(rq, p, wakeup, now);
+ p->sched_class->enqueue_task(rq, p, wakeup);
p->se.on_rq = 1;
}
static void
dequeue_task(struct rq *rq, struct task_struct *p, int sleep, u64 now)
{
- p->sched_class->dequeue_task(rq, p, sleep, now);
+ p->sched_class->dequeue_task(rq, p, sleep);
p->se.on_rq = 0;
}
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup, now);
- inc_nr_running(p, rq, now);
+ inc_nr_running(p, rq);
}
/*
rq->nr_uninterruptible--;
enqueue_task(rq, p, 0, now);
- inc_nr_running(p, rq, now);
+ inc_nr_running(p, rq);
}
/*
* Let the scheduling class do new task startup
* management (if any):
*/
- p->sched_class->task_new(rq, p, now);
- inc_nr_running(p, rq, now);
+ p->sched_class->task_new(rq, p);
+ inc_nr_running(p, rq);
}
check_preempt_curr(rq, p);
task_rq_unlock(rq, &flags);
unsigned long total_load = this_rq->ls.load.weight;
unsigned long this_load = total_load;
struct load_stat *ls = &this_rq->ls;
- u64 now = __rq_clock(this_rq);
+ u64 now;
int i, scale;
+ __update_rq_clock(this_rq);
+ now = this_rq->clock;
+
this_rq->nr_load_updates++;
if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
goto do_avg;
/* Update delta_fair/delta_exec fields first */
- update_curr_load(this_rq, now);
+ update_curr_load(this_rq);
fair_delta64 = ls->delta_fair + 1;
ls->delta_fair = 0;
exec_delta64 = ls->delta_exec + 1;
ls->delta_exec = 0;
- sample_interval64 = now - ls->load_update_last;
- ls->load_update_last = now;
+ sample_interval64 = this_rq->clock - ls->load_update_last;
+ ls->load_update_last = this_rq->clock;
if ((s64)sample_interval64 < (s64)TICK_NSEC)
sample_interval64 = TICK_NSEC;
* Pick up the highest-prio task:
*/
static inline struct task_struct *
-pick_next_task(struct rq *rq, struct task_struct *prev, u64 now)
+pick_next_task(struct rq *rq, struct task_struct *prev)
{
struct sched_class *class;
struct task_struct *p;
* the fair class we can call that function directly:
*/
if (likely(rq->nr_running == rq->cfs.nr_running)) {
- p = fair_sched_class.pick_next_task(rq, now);
+ p = fair_sched_class.pick_next_task(rq);
if (likely(p))
return p;
}
class = sched_class_highest;
for ( ; ; ) {
- p = class->pick_next_task(rq, now);
+ p = class->pick_next_task(rq);
if (p)
return p;
/*
spin_lock_irq(&rq->lock);
clear_tsk_need_resched(prev);
- now = __rq_clock(rq);
+ __update_rq_clock(rq);
+ now = rq->clock;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
- prev->sched_class->put_prev_task(rq, prev, now);
- next = pick_next_task(rq, prev, now);
+ prev->sched_class->put_prev_task(rq, prev);
+ next = pick_next_task(rq, prev);
sched_info_switch(prev, next);
on_rq = p->se.on_rq;
if (on_rq) {
dequeue_task(rq, p, 0, now);
- dec_load(rq, p, now);
+ dec_load(rq, p);
}
p->static_prio = NICE_TO_PRIO(nice);
if (on_rq) {
enqueue_task(rq, p, 0, now);
- inc_load(rq, p, now);
+ inc_load(rq, p);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
if (!rq->nr_running)
break;
update_rq_clock(rq);
- next = pick_next_task(rq, rq->curr, rq->clock);
+ next = pick_next_task(rq, rq->curr);
if (!next)
break;
migrate_dead(dead_cpu, next);