]> err.no Git - linux-2.6/commitdiff
sched: remove stat_gran
authorIngo Molnar <mingo@elte.hu>
Mon, 15 Oct 2007 15:00:03 +0000 (17:00 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 15 Oct 2007 15:00:03 +0000 (17:00 +0200)
remove the stat_gran code - it was disabled by default and it causes
unnecessary overhead.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/sched.h
kernel/sched.c
kernel/sched_fair.c
kernel/sysctl.c

index befca3f9364af291d2f126b0b3e334a1970a0af1..3c38a5040e8f30f7c028a836f9d0e4b9d5c0a8dc 100644 (file)
@@ -895,9 +895,6 @@ struct load_weight {
  */
 struct sched_entity {
        long                    wait_runtime;
-       unsigned long           delta_fair_run;
-       unsigned long           delta_fair_sleep;
-       unsigned long           delta_exec;
        s64                     fair_key;
        struct load_weight      load;           /* for load-balancing */
        struct rb_node          run_node;
index ae1544f0a20dc6f7b619d12a99f44c47578a312d..d4dabfcc776c7003a9e76a024a1c7908f05aac31 100644 (file)
@@ -829,7 +829,7 @@ static void update_curr_load(struct rq *rq)
         * Stagger updates to ls->delta_fair. Very frequent updates
         * can be expensive.
         */
-       if (ls->delta_stat >= sysctl_sched_stat_granularity)
+       if (ls->delta_stat)
                __update_curr_load(rq, ls);
 }
 
@@ -1588,9 +1588,6 @@ static void __sched_fork(struct task_struct *p)
        p->se.exec_start                = 0;
        p->se.sum_exec_runtime          = 0;
        p->se.prev_sum_exec_runtime     = 0;
-       p->se.delta_exec                = 0;
-       p->se.delta_fair_run            = 0;
-       p->se.delta_fair_sleep          = 0;
        p->se.wait_runtime              = 0;
        p->se.sleep_start_fair          = 0;
 
index 2e84aaffe425e16de26a1792a6be48355b87a985..2138c40f4836e76c9d949e482fba13e783002bbe 100644 (file)
@@ -85,8 +85,6 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
  */
 const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
 
-const_debug unsigned int sysctl_sched_stat_granularity;
-
 unsigned int sysctl_sched_runtime_limit __read_mostly;
 
 /*
@@ -360,13 +358,13 @@ add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
  * are not in our scheduling class.
  */
 static inline void
-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
+             unsigned long delta_exec)
 {
-       unsigned long delta, delta_exec, delta_fair, delta_mine;
+       unsigned long delta, delta_fair, delta_mine;
        struct load_weight *lw = &cfs_rq->load;
        unsigned long load = lw->weight;
 
-       delta_exec = curr->delta_exec;
        schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
 
        curr->sum_exec_runtime += delta_exec;
@@ -400,6 +398,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 static void update_curr(struct cfs_rq *cfs_rq)
 {
        struct sched_entity *curr = cfs_rq_curr(cfs_rq);
+       u64 now = rq_of(cfs_rq)->clock;
        unsigned long delta_exec;
 
        if (unlikely(!curr))
@@ -410,15 +409,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
         * since the last time we changed load (this cannot
         * overflow on 32 bits):
         */
-       delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
-
-       curr->delta_exec += delta_exec;
+       delta_exec = (unsigned long)(now - curr->exec_start);
 
-       if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
-               __update_curr(cfs_rq, curr);
-               curr->delta_exec = 0;
-       }
-       curr->exec_start = rq_of(cfs_rq)->clock;
+       __update_curr(cfs_rq, curr, delta_exec);
+       curr->exec_start = now;
 }
 
 static inline void
@@ -494,10 +488,9 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  * Note: must be called with a freshly updated rq->fair_clock.
  */
 static inline void
-__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
+                       unsigned long delta_fair)
 {
-       unsigned long delta_fair = se->delta_fair_run;
-
        schedstat_set(se->wait_max, max(se->wait_max,
                        rq_of(cfs_rq)->clock - se->wait_start));
 
@@ -519,12 +512,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
        delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
                        (u64)(cfs_rq->fair_clock - se->wait_start_fair));
 
-       se->delta_fair_run += delta_fair;
-       if (unlikely(abs(se->delta_fair_run) >=
-                               sysctl_sched_stat_granularity)) {
-               __update_stats_wait_end(cfs_rq, se);
-               se->delta_fair_run = 0;
-       }
+       __update_stats_wait_end(cfs_rq, se, delta_fair);
 
        se->wait_start_fair = 0;
        schedstat_set(se->wait_start, 0);
@@ -567,9 +555,10 @@ update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
  * Scheduling class queueing methods:
  */
 
-static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
+                             unsigned long delta_fair)
 {
-       unsigned long load = cfs_rq->load.weight, delta_fair;
+       unsigned long load = cfs_rq->load.weight;
        long prev_runtime;
 
        /*
@@ -582,8 +571,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
        if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
                load = rq_of(cfs_rq)->cpu_load[2];
 
-       delta_fair = se->delta_fair_sleep;
-
        /*
         * Fix up delta_fair with the effect of us running
         * during the whole sleep period:
@@ -618,12 +605,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
        delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
                (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
 
-       se->delta_fair_sleep += delta_fair;
-       if (unlikely(abs(se->delta_fair_sleep) >=
-                               sysctl_sched_stat_granularity)) {
-               __enqueue_sleeper(cfs_rq, se);
-               se->delta_fair_sleep = 0;
-       }
+       __enqueue_sleeper(cfs_rq, se, delta_fair);
 
        se->sleep_start_fair = 0;
 
index 6c97259e863e6bc96545bc873a132e05de29105c..9b1b0d4ff9662e485ec35d87bcc29c2c03ddf1fe 100644 (file)
@@ -264,17 +264,6 @@ static ctl_table kern_table[] = {
                .extra1         = &min_wakeup_granularity_ns,
                .extra2         = &max_wakeup_granularity_ns,
        },
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "sched_stat_granularity_ns",
-               .data           = &sysctl_sched_stat_granularity,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &min_wakeup_granularity_ns,
-               .extra2         = &max_wakeup_granularity_ns,
-       },
        {
                .ctl_name       = CTL_UNNUMBERED,
                .procname       = "sched_runtime_limit_ns",