From: Ingo Molnar Date: Mon, 9 Jul 2007 16:51:58 +0000 (+0200) Subject: sched: add rq_clock()/__rq_clock() X-Git-Tag: v2.6.23-rc1~1262 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=20d315d42aed95423a7203e1d7e84086004b5a00;p=linux-2.6 sched: add rq_clock()/__rq_clock() add rq_clock()/__rq_clock(), a robust wrapper around sched_clock(), used by CFS. It protects against common type of sched_clock() problems (caused by hardware): time warps forwards and backwards. Signed-off-by: Ingo Molnar --- diff --git a/kernel/sched.c b/kernel/sched.c index 085418bedc..29eb227e33 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -388,6 +388,52 @@ static inline int cpu_of(struct rq *rq) #endif } +/* + * Per-runqueue clock, as finegrained as the platform can give us: + */ +static unsigned long long __rq_clock(struct rq *rq) +{ + u64 prev_raw = rq->prev_clock_raw; + u64 now = sched_clock(); + s64 delta = now - prev_raw; + u64 clock = rq->clock; + + /* + * Protect against sched_clock() occasionally going backwards: + */ + if (unlikely(delta < 0)) { + clock++; + rq->clock_warps++; + } else { + /* + * Catch too large forward jumps too: + */ + if (unlikely(delta > 2*TICK_NSEC)) { + clock++; + rq->clock_overflows++; + } else { + if (unlikely(delta > rq->clock_max_delta)) + rq->clock_max_delta = delta; + clock += delta; + } + } + + rq->prev_clock_raw = now; + rq->clock = clock; + + return clock; +} + +static inline unsigned long long rq_clock(struct rq *rq) +{ + int this_cpu = smp_processor_id(); + + if (this_cpu == cpu_of(rq)) + return __rq_clock(rq); + + return rq->clock; +} + /* * The domain tree (rq->sd) is protected by RCU's quiescent state transition. * See detach_destroy_domains: synchronize_sched for details.