X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fsched_fair.c;h=da7c061e72062eacd6afee8a9870e9044ad0e2c5;hb=b5677d848cbb94220ac2cfd36d93bcdbe49c3280;hp=c495dcf7031b2ec62200f68bd3fff8f6a87df832;hpb=b2be5e96dc0b5a179cf4cb98e65cfb605752ca26;p=linux-2.6 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c495dcf703..da7c061e72 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -22,7 +22,7 @@ /* * Targeted preemption latency for CPU-bound tasks: - * (default: 20ms, units: nanoseconds) + * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds) * * NOTE: this latency value is not the same as the concept of * 'timeslice length' - timeslices in CFS are of variable length @@ -32,18 +32,18 @@ * (to see the precise effective timeslice length of your workload, * run vmstat and monitor the context-switches (cs) field) */ -const_debug unsigned int sysctl_sched_latency = 20000000ULL; +unsigned int sysctl_sched_latency = 20000000ULL; /* * Minimal preemption granularity for CPU-bound tasks: - * (default: 1 msec, units: nanoseconds) + * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -const_debug unsigned int sysctl_sched_min_granularity = 1000000ULL; +unsigned int sysctl_sched_min_granularity = 4000000ULL; /* * is kept at sysctl_sched_latency / sysctl_sched_min_granularity */ -const_debug unsigned int sched_nr_latency = 20; +static unsigned int sched_nr_latency = 5; /* * After fork, child runs first. (default) If set to 0 then @@ -61,23 +61,23 @@ unsigned int __read_mostly sysctl_sched_compat_yield; /* * SCHED_BATCH wake-up granularity. - * (default: 10 msec, units: nanoseconds) + * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. */ -const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL; +unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL; /* * SCHED_OTHER wake-up granularity. - * (default: 10 msec, units: nanoseconds) + * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. */ -const_debug unsigned int sysctl_sched_wakeup_granularity = 10000000UL; +unsigned int sysctl_sched_wakeup_granularity = 10000000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL; @@ -351,6 +351,12 @@ static void update_curr(struct cfs_rq *cfs_rq) __update_curr(cfs_rq, curr, delta_exec); curr->exec_start = now; + + if (entity_is_task(curr)) { + struct task_struct *curtask = task_of(curr); + + cpuacct_charge(curtask, delta_exec); + } } static inline void @@ -505,8 +511,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) if (!initial) { /* sleeps upto a single latency don't count. */ - if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) && - task_of(se)->policy != SCHED_BATCH) + if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se)) vruntime -= sysctl_sched_latency; /* ensure we never gain time by being placed backwards. */ @@ -546,7 +551,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) update_stats_dequeue(cfs_rq, se); if (sleep) { - se->peer_preempt = 0; #ifdef CONFIG_SCHEDSTATS if (entity_is_task(se)) { struct task_struct *tsk = task_of(se); @@ -574,10 +578,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; - if (delta_exec > ideal_runtime || - (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt)) + if (delta_exec > ideal_runtime) resched_task(rq_of(cfs_rq)->curr); - curr->peer_preempt = 0; } static void @@ -796,8 +798,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) */ static void yield_task_fair(struct rq *rq) { - struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr); - struct sched_entity *rightmost, *se = &rq->curr->se; + struct task_struct *curr = rq->curr; + struct cfs_rq *cfs_rq = task_cfs_rq(curr); + struct sched_entity *rightmost, *se = &curr->se; /* * Are we the only task in the tree? @@ -805,7 +808,7 @@ static void yield_task_fair(struct rq *rq) if (unlikely(cfs_rq->nr_running == 1)) return; - if (likely(!sysctl_sched_compat_yield)) { + if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { __update_rq_clock(rq); /* * Update run-time statistics of the 'current'. @@ -840,7 +843,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) struct task_struct *curr = rq->curr; struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct sched_entity *se = &curr->se, *pse = &p->se; - s64 delta, gran; + unsigned long gran; if (unlikely(rt_prio(p->prio))) { update_rq_clock(rq); @@ -855,24 +858,20 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) if (unlikely(p->policy == SCHED_BATCH)) return; - if (sched_feat(WAKEUP_PREEMPT)) { - while (!is_same_group(se, pse)) { - se = parent_entity(se); - pse = parent_entity(pse); - } + if (!sched_feat(WAKEUP_PREEMPT)) + return; - delta = se->vruntime - pse->vruntime; - gran = sysctl_sched_wakeup_granularity; - if (unlikely(se->load.weight != NICE_0_LOAD)) - gran = calc_delta_fair(gran, &se->load); + while (!is_same_group(se, pse)) { + se = parent_entity(se); + pse = parent_entity(pse); + } - if (delta > gran) { - int now = !sched_feat(PREEMPT_RESTRICT); + gran = sysctl_sched_wakeup_granularity; + if (unlikely(se->load.weight != NICE_0_LOAD)) + gran = calc_delta_fair(gran, &se->load); - if (now || p->prio < curr->prio || !se->peer_preempt++) - resched_task(curr); - } - } + if (pse->vruntime + gran < se->vruntime) + resched_task(curr); } static struct task_struct *pick_next_task_fair(struct rq *rq) @@ -1074,8 +1073,9 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) update_curr(cfs_rq); place_entity(cfs_rq, se, 1); + /* 'curr' will be NULL if the child belongs to a different group */ if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && - curr->vruntime < se->vruntime) { + curr && curr->vruntime < se->vruntime) { /* * Upon rescheduling, sched_class::put_prev_task() will place * 'current' within the tree based on its new key value. @@ -1083,7 +1083,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) swap(curr->vruntime, se->vruntime); } - se->peer_preempt = 0; enqueue_task_fair(rq, p, 0); resched_task(rq->curr); }