X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fsched_clock.c;h=204991a0bfa7d707e6da0e5c27b593b81b42ddb8;hb=ddd13dc606ea1a06f2cf7d11dc06418de3e28121;hp=b96559cb96a52153e27513a4fb7940e0fbfae8af;hpb=50526968e99afbca34924abcb04658b6dd5c5ea5;p=linux-2.6 diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index b96559cb96..204991a0bf 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -42,6 +42,8 @@ unsigned long long __attribute__((weak)) sched_clock(void) return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); } +static __read_mostly int sched_clock_running; + #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK struct sched_clock_data { @@ -53,7 +55,6 @@ struct sched_clock_data { raw_spinlock_t lock; unsigned long tick_jiffies; - u64 prev_raw; u64 tick_raw; u64 tick_gtod; u64 clock; @@ -71,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) return &per_cpu(sched_clock_data, cpu); } -static __read_mostly int sched_clock_running; - void sched_clock_init(void) { u64 ktime_now = ktime_to_ns(ktime_get()); @@ -84,7 +83,6 @@ void sched_clock_init(void) scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; scd->tick_jiffies = now_jiffies; - scd->prev_raw = 0; scd->tick_raw = 0; scd->tick_gtod = ktime_now; scd->clock = ktime_now; @@ -99,13 +97,13 @@ void sched_clock_init(void) * - filter out backward motion * - use jiffies to generate a min,max window to clip the raw values */ -static void __update_sched_clock(struct sched_clock_data *scd, u64 now) +static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) { unsigned long now_jiffies = jiffies; long delta_jiffies = now_jiffies - scd->tick_jiffies; u64 clock = scd->clock; u64 min_clock, max_clock; - s64 delta = now - scd->prev_raw; + s64 delta = now - scd->tick_raw; WARN_ON_ONCE(!irqs_disabled()); min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; @@ -130,9 +128,10 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now) if (unlikely(clock < min_clock)) clock = min_clock; - scd->prev_raw = now; scd->tick_jiffies = now_jiffies; scd->clock = clock; + + return clock; } static void lock_double_clock(struct sched_clock_data *data1, @@ -150,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1, u64 sched_clock_cpu(int cpu) { struct sched_clock_data *scd = cpu_sdc(cpu); - u64 now, clock; + u64 now, clock, this_clock, remote_clock; if (unlikely(!sched_clock_running)) return 0ull; @@ -159,27 +158,36 @@ u64 sched_clock_cpu(int cpu) now = sched_clock(); if (cpu != raw_smp_processor_id()) { - /* - * in order to update a remote cpu's clock based on our - * unstable raw time rebase it against: - * tick_raw (offset between raw counters) - * tick_gotd (tick offset between cpus) - */ struct sched_clock_data *my_scd = this_scd(); lock_double_clock(scd, my_scd); - now += scd->tick_raw - my_scd->tick_raw; - now += my_scd->tick_gtod - scd->tick_gtod; + this_clock = __update_sched_clock(my_scd, now); + remote_clock = scd->clock; + + /* + * Use the opportunity that we have both locks + * taken to couple the two clocks: we take the + * larger time as the latest time for both + * runqueues. (this creates monotonic movement) + */ + if (likely(remote_clock < this_clock)) { + clock = this_clock; + scd->clock = clock; + } else { + /* + * Should be rare, but possible: + */ + clock = remote_clock; + my_scd->clock = remote_clock; + } __raw_spin_unlock(&my_scd->lock); } else { __raw_spin_lock(&scd->lock); + clock = __update_sched_clock(scd, now); } - __update_sched_clock(scd, now); - clock = scd->clock; - __raw_spin_unlock(&scd->lock); return clock; @@ -225,7 +233,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); void sched_clock_idle_wakeup_event(u64 delta_ns) { struct sched_clock_data *scd = this_scd(); - u64 now = sched_clock(); /* * Override the previous timestamp and ignore all @@ -234,7 +241,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) * rq clock: */ __raw_spin_lock(&scd->lock); - scd->prev_raw = now; scd->clock += delta_ns; __raw_spin_unlock(&scd->lock); @@ -242,6 +248,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); +#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ + +void sched_clock_init(void) +{ + sched_clock_running = 1; +} + +u64 sched_clock_cpu(int cpu) +{ + if (unlikely(!sched_clock_running)) + return 0; + + return sched_clock(); +} + #endif unsigned long long cpu_clock(int cpu)