From: Atsushi Nemoto Date: Sun, 1 Oct 2006 06:28:31 +0000 (-0700) Subject: [PATCH] kill wall_jiffies X-Git-Tag: v2.6.19-rc1~550 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8ef386092d7c2891bd7acefb2a87f878f7e9a0d6;p=linux-2.6 [PATCH] kill wall_jiffies With 2.6.18-rc4-mm2, now wall_jiffies will always be the same as jiffies. So we can kill wall_jiffies completely. This is just a cleanup and logically should not change any real behavior except for one thing: RTC updating code in (old) ppc and xtensa use a condition "jiffies - wall_jiffies == 1". This condition is never met so I suppose it is just a bug. I just remove that condition only instead of kill the whole "if" block. [heiko.carstens@de.ibm.com: s390 build fix and cleanup] Signed-off-by: Atsushi Nemoto Cc: Andi Kleen Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Russell King Cc: Ian Molton Cc: Mikael Starvik Cc: David Howells Cc: Yoshinori Sato Cc: Hirokazu Takata Cc: Ralf Baechle Cc: Kyle McMartin Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Paul Mundt Cc: Kazumoto Kojima Cc: Richard Curnow Cc: William Lee Irwin III Cc: "David S. Miller" Cc: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Cc: Miles Bader Cc: Chris Zankel Cc: "Luck, Tony" Cc: Geert Uytterhoeven Cc: Roman Zippel Signed-off-by: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 7c1e44420a..581ddcc22f 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -54,8 +54,6 @@ #include "proto.h" #include "irq_impl.h" -extern unsigned long wall_jiffies; /* kernel/timer.c */ - static int set_rtc_mmss(unsigned long); DEFINE_SPINLOCK(rtc_lock); @@ -413,7 +411,7 @@ void do_gettimeofday(struct timeval *tv) { unsigned long flags; - unsigned long sec, usec, lost, seq; + unsigned long sec, usec, seq; unsigned long delta_cycles, delta_usec, partial_tick; do { @@ -423,14 +421,13 @@ do_gettimeofday(struct timeval *tv) sec = xtime.tv_sec; usec = (xtime.tv_nsec / 1000); partial_tick = state.partial_tick; - lost = jiffies - wall_jiffies; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); #ifdef CONFIG_SMP /* Until and unless we figure out how to get cpu cycle counters in sync and keep them there, we can't use the rpcc tricks. */ - delta_usec = lost * (1000000 / HZ); + delta_usec = 0; #else /* * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks) @@ -446,8 +443,7 @@ do_gettimeofday(struct timeval *tv) */ delta_usec = (delta_cycles * state.scaled_ticks_per_cycle - + partial_tick - + (lost << FIX_SHIFT)) * 15625; + + partial_tick) * 15625; delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; #endif @@ -480,12 +476,11 @@ do_settimeofday(struct timespec *tv) time. Without this, a full-tick error is possible. */ #ifdef CONFIG_SMP - delta_nsec = (jiffies - wall_jiffies) * (NSEC_PER_SEC / HZ); + delta_nsec = 0; #else delta_nsec = rpcc() - state.last_time; delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle - + state.partial_tick - + ((jiffies - wall_jiffies) << FIX_SHIFT)) * 15625; + + state.partial_tick) * 15625; delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; delta_nsec *= 1000; #endif diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index f7d5165796..b030320b17 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -37,8 +37,6 @@ */ struct sys_timer *system_timer; -extern unsigned long wall_jiffies; - /* this needs a better home */ DEFINE_SPINLOCK(rtc_lock); @@ -237,16 +235,11 @@ void do_gettimeofday(struct timeval *tv) { unsigned long flags; unsigned long seq; - unsigned long usec, sec, lost; + unsigned long usec, sec; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); usec = system_timer->offset(); - - lost = jiffies - wall_jiffies; - if (lost) - usec += lost * USECS_PER_JIFFY; - sec = xtime.tv_sec; usec += xtime.tv_nsec / 1000; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); @@ -279,7 +272,6 @@ int do_settimeofday(struct timespec *tv) * done, and then undo it! */ nsec -= system_timer->offset() * NSEC_PER_USEC; - nsec -= (jiffies - wall_jiffies) * TICK_NSEC; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); diff --git a/arch/arm26/kernel/time.c b/arch/arm26/kernel/time.c index 80adbd005f..1206469b2b 100644 --- a/arch/arm26/kernel/time.c +++ b/arch/arm26/kernel/time.c @@ -33,8 +33,6 @@ #include #include -extern unsigned long wall_jiffies; - /* this needs a better home */ DEFINE_SPINLOCK(rtc_lock); @@ -136,16 +134,11 @@ void do_gettimeofday(struct timeval *tv) { unsigned long flags; unsigned long seq; - unsigned long usec, sec, lost; + unsigned long usec, sec; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); usec = gettimeoffset(); - - lost = jiffies - wall_jiffies; - if (lost) - usec += lost * USECS_PER_JIFFY; - sec = xtime.tv_sec; usec += xtime.tv_nsec / 1000; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); @@ -174,8 +167,7 @@ int do_settimeofday(struct timespec *tv) * wall time. Discover what correction gettimeofday() would have * done, and then undo it! */ - tv->tv_nsec -= 1000 * (gettimeoffset() + - (jiffies - wall_jiffies) * USECS_PER_JIFFY); + tv->tv_nsec -= 1000 * gettimeoffset(); while (tv->tv_nsec < 0) { tv->tv_nsec += NSEC_PER_SEC; diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c index 66ba8898db..0f9213cbd4 100644 --- a/arch/cris/kernel/time.c +++ b/arch/cris/kernel/time.c @@ -37,7 +37,6 @@ int have_rtc; /* used to remember if we have an RTC or not */; #define TICK_SIZE tick -extern unsigned long wall_jiffies; extern unsigned long loops_per_jiffy; /* init/main.c */ unsigned long loops_per_usec; @@ -58,11 +57,6 @@ void do_gettimeofday(struct timeval *tv) local_irq_save(flags); local_irq_disable(); usec = do_gettimeoffset(); - { - unsigned long lost = jiffies - wall_jiffies; - if (lost) - usec += lost * (1000000 / HZ); - } /* * If time_adjust is negative then NTP is slowing the clock @@ -103,7 +97,6 @@ int do_settimeofday(struct timespec *tv) * made, and then undo it! */ nsec -= do_gettimeoffset() * NSEC_PER_USEC; - nsec -= (jiffies - wall_jiffies) * TICK_NSEC; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 86944acfb6..58a2d55824 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c @@ -76,8 +76,6 @@ int pit_latch_buggy; /* extern */ unsigned int cpu_khz; /* Detected as we calibrate the TSC */ EXPORT_SYMBOL(cpu_khz); -extern unsigned long wall_jiffies; - DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); @@ -329,7 +327,6 @@ static int timer_resume(struct sys_device *dev) do_settimeofday(&ts); write_seqlock_irqsave(&xtime_lock, flags); jiffies_64 += sleep_length; - wall_jiffies += sleep_length; write_sequnlock_irqrestore(&xtime_lock, flags); touch_softlockup_watchdog(); return 0; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 16262687a1..62e07f906e 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -29,8 +29,6 @@ #include #include -extern unsigned long wall_jiffies; - volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ #ifdef CONFIG_IA64_DEBUG_IRQ diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index 7a896893cd..d8af155db9 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -38,7 +38,6 @@ extern void send_IPI_allbutself(int, int); extern void smp_local_timer_interrupt(struct pt_regs *); #endif -extern unsigned long wall_jiffies; #define TICK_SIZE (tick_nsec / 1000) /* @@ -108,24 +107,17 @@ void do_gettimeofday(struct timeval *tv) unsigned long max_ntp_tick = tick_usec - tickadj; do { - unsigned long lost; - seq = read_seqbegin(&xtime_lock); usec = do_gettimeoffset(); - lost = jiffies - wall_jiffies; /* * If time_adjust is negative then NTP is slowing the clock * so make sure not to go into next possible interval. * Better to lose some accuracy than have time go backwards.. */ - if (unlikely(time_adjust < 0)) { + if (unlikely(time_adjust < 0)) usec = min(usec, max_ntp_tick); - if (lost) - usec += lost * max_ntp_tick; - } else if (unlikely(lost)) - usec += lost * tick_usec; sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); @@ -158,7 +150,6 @@ int do_settimeofday(struct timespec *tv) * made, and then undo it! */ nsec -= do_gettimeoffset() * NSEC_PER_USEC; - nsec -= (jiffies - wall_jiffies) * TICK_NSEC; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 1072e4946a..6cfc984380 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -96,31 +96,23 @@ void time_init(void) void do_gettimeofday(struct timeval *tv) { unsigned long flags; - extern unsigned long wall_jiffies; unsigned long seq; - unsigned long usec, sec, lost; + unsigned long usec, sec; unsigned long max_ntp_tick = tick_usec - tickadj; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); usec = mach_gettimeoffset(); - lost = jiffies - wall_jiffies; /* * If time_adjust is negative then NTP is slowing the clock * so make sure not to go into next possible interval. * Better to lose some accuracy than have time go backwards.. */ - if (unlikely(time_adjust < 0)) { + if (unlikely(time_adjust < 0)) usec = min(usec, max_ntp_tick); - if (lost) - usec += lost * max_ntp_tick; - } - else if (unlikely(lost)) - usec += lost * tick_usec; - sec = xtime.tv_sec; usec += xtime.tv_nsec/1000; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); @@ -141,7 +133,6 @@ int do_settimeofday(struct timespec *tv) { time_t wtm_sec, sec = tv->tv_sec; long wtm_nsec, nsec = tv->tv_nsec; - extern unsigned long wall_jiffies; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; @@ -153,8 +144,7 @@ int do_settimeofday(struct timespec *tv) * Discover what correction gettimeofday * would have done, and then undo it! */ - nsec -= 1000 * (mach_gettimeoffset() + - (jiffies - wall_jiffies) * (1000000 / HZ)); + nsec -= 1000 * mach_gettimeoffset(); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c index db1e1ce0a3..c5667bdddd 100644 --- a/arch/m68knommu/kernel/time.c +++ b/arch/m68knommu/kernel/time.c @@ -26,8 +26,6 @@ #define TICK_SIZE (tick_nsec / 1000) -extern unsigned long wall_jiffies; - static inline int set_rtc_mmss(unsigned long nowtime) { @@ -124,15 +122,12 @@ void time_init(void) void do_gettimeofday(struct timeval *tv) { unsigned long flags; - unsigned long lost, seq; + unsigned long seq; unsigned long usec, sec; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); usec = mach_gettimeoffset ? mach_gettimeoffset() : 0; - lost = jiffies - wall_jiffies; - if (lost) - usec += lost * (1000000 / HZ); sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 6ab8d975a9..845c7e5550 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -47,8 +47,6 @@ /* * forward reference */ -extern volatile unsigned long wall_jiffies; - DEFINE_SPINLOCK(rtc_lock); /* @@ -159,7 +157,6 @@ void (*mips_hpt_init)(unsigned int); void do_gettimeofday(struct timeval *tv) { unsigned long seq; - unsigned long lost; unsigned long usec, sec; unsigned long max_ntp_tick; @@ -168,8 +165,6 @@ void do_gettimeofday(struct timeval *tv) usec = do_gettimeoffset(); - lost = jiffies - wall_jiffies; - /* * If time_adjust is negative then NTP is slowing the clock * so make sure not to go into next possible interval. @@ -178,11 +173,7 @@ void do_gettimeofday(struct timeval *tv) if (unlikely(time_adjust < 0)) { max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj; usec = min(usec, max_ntp_tick); - - if (lost) - usec += lost * max_ntp_tick; - } else if (unlikely(lost)) - usec += lost * (USEC_PER_SEC / HZ); + } sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); @@ -217,7 +208,6 @@ int do_settimeofday(struct timespec *tv) * made, and then undo it! */ nsec -= do_gettimeoffset() * NSEC_PER_USEC; - nsec -= (jiffies - wall_jiffies) * tick_nsec; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index c62a3a9ef8..257ce118e3 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -42,8 +42,6 @@ static unsigned long ct_cur[NR_CPUS]; /* What counter should be at next timer irq */ static long last_rtc_update; /* Last time the rtc clock got updated */ -extern volatile unsigned long wall_jiffies; - #if 0 static int set_rtc_mmss(unsigned long nowtime) { diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 700df10924..ab641d67f5 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -32,9 +32,6 @@ #include -/* xtime and wall_jiffies keep wall-clock time */ -extern unsigned long wall_jiffies; - static long clocktick __read_mostly; /* timer cycles per tick */ static long halftick __read_mostly; @@ -112,7 +109,7 @@ EXPORT_SYMBOL(profile_pc); /*** converted from ia64 ***/ /* * Return the number of micro-seconds that elapsed since the last - * update to wall time (aka xtime aka wall_jiffies). The xtime_lock + * update to wall time (aka xtime). The xtime_lock * must be at least read-locked when calling this routine. */ static inline unsigned long diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 71f71da98e..8b278d85ca 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -117,8 +117,6 @@ unsigned tb_to_ns_shift; struct gettimeofday_struct do_gtod; -extern unsigned long wall_jiffies; - extern struct timezone sys_tz; static long timezone_offset; @@ -816,11 +814,6 @@ int do_settimeofday(struct timespec *tv) /* * Subtract off the number of nanoseconds since the * beginning of the last tick. - * Note that since we don't increment jiffies_64 anywhere other - * than in do_timer (since we don't have a lost tick problem), - * wall_jiffies will always be the same as jiffies, - * and therefore the (jiffies - wall_jiffies) computation - * has been removed. */ tb_delta = tb_ticks_since(tb_last_jiffy); tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c index 1e1f315547..187388625a 100644 --- a/arch/ppc/kernel/time.c +++ b/arch/ppc/kernel/time.c @@ -80,8 +80,6 @@ unsigned tb_to_us; unsigned tb_last_stamp; unsigned long tb_to_ns_scale; -extern unsigned long wall_jiffies; - /* used for timezone offset */ static long timezone_offset; @@ -173,8 +171,7 @@ void timer_interrupt(struct pt_regs * regs) */ if ( ppc_md.set_rtc_time && ntp_synced() && xtime.tv_sec - last_rtc_update >= 659 && - abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ && - jiffies - wall_jiffies == 1) { + abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ) { if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0) last_rtc_update = xtime.tv_sec+1; else @@ -200,7 +197,7 @@ void do_gettimeofday(struct timeval *tv) { unsigned long flags; unsigned long seq; - unsigned delta, lost_ticks, usec, sec; + unsigned delta, usec, sec; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); @@ -214,10 +211,9 @@ void do_gettimeofday(struct timeval *tv) if (!smp_tb_synchronized) delta = 0; #endif /* CONFIG_SMP */ - lost_ticks = jiffies - wall_jiffies; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - usec += mulhwu(tb_to_us, tb_ticks_per_jiffy * lost_ticks + delta); + usec += mulhwu(tb_to_us, delta); while (usec >= 1000000) { sec++; usec -= 1000000; @@ -258,7 +254,6 @@ int do_settimeofday(struct timespec *tv) * still reasonable when gettimeofday resolution is 1 jiffy. */ tb_delta = tb_ticks_since(last_jiffy_stamp(smp_processor_id())); - tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index abab42e9f5..4bf66cc4a2 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -53,8 +53,6 @@ static u64 init_timer_cc; static u64 jiffies_timer_cc; static u64 xtime_cc; -extern unsigned long wall_jiffies; - /* * Scheduler clock - returns current time in nanosec units. */ @@ -87,9 +85,8 @@ static inline unsigned long do_gettimeoffset(void) { __u64 now; - now = (get_clock() - jiffies_timer_cc) >> 12; - /* We require the offset from the latest update of xtime */ - now -= (__u64) wall_jiffies*USECS_PER_JIFFY; + now = (get_clock() - jiffies_timer_cc) >> 12; + now -= (__u64) jiffies * USECS_PER_JIFFY; return (unsigned long) now; } diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index f664a196c4..450c68f1df 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -18,7 +18,6 @@ #include #include -extern unsigned long wall_jiffies; struct sys_timer *sys_timer; /* Move this somewhere more sensible.. */ @@ -52,16 +51,10 @@ void do_gettimeofday(struct timeval *tv) { unsigned long seq; unsigned long usec, sec; - unsigned long lost; do { seq = read_seqbegin(&xtime_lock); usec = get_timer_offset(); - - lost = jiffies - wall_jiffies; - if (lost) - usec += lost * (1000000 / HZ); - sec = xtime.tv_sec; usec += xtime.tv_nsec / 1000; } while (read_seqretry(&xtime_lock, seq)); @@ -91,8 +84,7 @@ int do_settimeofday(struct timespec *tv) * wall time. Discover what correction gettimeofday() would have * made, and then undo it! */ - nsec -= 1000 * (get_timer_offset() + - (jiffies - wall_jiffies) * (1000000 / HZ)); + nsec -= 1000 * get_timer_offset(); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c index 3b61e06f9d..9c4a38a869 100644 --- a/arch/sh64/kernel/time.c +++ b/arch/sh64/kernel/time.c @@ -107,8 +107,6 @@ #define TICK_SIZE (tick_nsec / 1000) -extern unsigned long wall_jiffies; - static unsigned long tmu_base, rtc_base; unsigned long cprc_base; @@ -194,13 +192,6 @@ void do_gettimeofday(struct timeval *tv) do { seq = read_seqbegin_irqsave(&xtime_lock, flags); usec = usecs_since_tick(); - { - unsigned long lost = jiffies - wall_jiffies; - - if (lost) - usec += lost * (1000000 / HZ); - } - sec = xtime.tv_sec; usec += xtime.tv_nsec / 1000; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); @@ -229,8 +220,7 @@ int do_settimeofday(struct timespec *tv) * wall time. Discover what correction gettimeofday() would have * made, and then undo it! */ - nsec -= 1000 * (usecs_since_tick() + - (jiffies - wall_jiffies) * (1000000 / HZ)); + nsec -= 1000 * usecs_since_tick(); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index e19b1bad9b..edb6cc665f 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -765,8 +765,6 @@ static __inline__ unsigned long do_gettimeoffset(void) return count; } -extern unsigned long wall_jiffies; - static void pci_do_gettimeofday(struct timeval *tv) { unsigned long flags; @@ -775,26 +773,17 @@ static void pci_do_gettimeofday(struct timeval *tv) unsigned long max_ntp_tick = tick_usec - tickadj; do { - unsigned long lost; - seq = read_seqbegin_irqsave(&xtime_lock, flags); usec = do_gettimeoffset(); - lost = jiffies - wall_jiffies; /* * If time_adjust is negative then NTP is slowing the clock * so make sure not to go into next possible interval. * Better to lose some accuracy than have time go backwards.. */ - if (unlikely(time_adjust < 0)) { + if (unlikely(time_adjust < 0)) usec = min(usec, max_ntp_tick); - if (lost) - usec += lost * max_ntp_tick; - } - else if (unlikely(lost)) - usec += lost * tick_usec; - sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); @@ -819,8 +808,7 @@ static int pci_do_settimeofday(struct timespec *tv) * wall time. Discover what correction gettimeofday() would have * made, and then undo it! */ - tv->tv_nsec -= 1000 * (do_gettimeoffset() + - (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ)); + tv->tv_nsec -= 1000 * do_gettimeoffset(); while (tv->tv_nsec < 0) { tv->tv_nsec += NSEC_PER_SEC; tv->tv_sec--; diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index 6f84fa1b58..e10dc83194 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c @@ -43,8 +43,6 @@ #include #include -extern unsigned long wall_jiffies; - DEFINE_SPINLOCK(rtc_lock); enum sparc_clock_type sp_clock_typ; DEFINE_SPINLOCK(mostek_lock); @@ -449,7 +447,7 @@ unsigned long long sched_clock(void) /* Ok, my cute asm atomicity trick doesn't work anymore. * There are just too many variables that need to be protected - * now (both members of xtime, wall_jiffies, et al.) + * now (both members of xtime, et al.) */ void do_gettimeofday(struct timeval *tv) { @@ -459,26 +457,17 @@ void do_gettimeofday(struct timeval *tv) unsigned long max_ntp_tick = tick_usec - tickadj; do { - unsigned long lost; - seq = read_seqbegin_irqsave(&xtime_lock, flags); usec = do_gettimeoffset(); - lost = jiffies - wall_jiffies; /* * If time_adjust is negative then NTP is slowing the clock * so make sure not to go into next possible interval. * Better to lose some accuracy than have time go backwards.. */ - if (unlikely(time_adjust < 0)) { + if (unlikely(time_adjust < 0)) usec = min(usec, max_ntp_tick); - if (lost) - usec += lost * max_ntp_tick; - } - else if (unlikely(lost)) - usec += lost * tick_usec; - sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); @@ -521,8 +510,7 @@ static int sbus_do_settimeofday(struct timespec *tv) * wall time. Discover what correction gettimeofday() would have * made, and then undo it! */ - nsec -= 1000 * (do_gettimeoffset() + - (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ)); + nsec -= 1000 * do_gettimeoffset(); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index ca1193482f..00f6fc4aaa 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -53,8 +53,6 @@ void __iomem *mstk48t02_regs = NULL; unsigned long ds1287_regs = 0UL; #endif -extern unsigned long wall_jiffies; - static void __iomem *mstk48t08_regs; static void __iomem *mstk48t59_regs; diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 7ea3bf2a85..557e92af7b 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -77,7 +77,6 @@ unsigned long long monotonic_base; struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */ volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; -unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES; struct timespec __xtime __section_xtime; struct timezone __sys_tz __section_sys_tz; @@ -119,7 +118,7 @@ unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc; void do_gettimeofday(struct timeval *tv) { - unsigned long seq, t; + unsigned long seq; unsigned int sec, usec; do { @@ -136,10 +135,7 @@ void do_gettimeofday(struct timeval *tv) be found. Note when you fix it here you need to do the same in arch/x86_64/kernel/vsyscall.c and export all needed variables in vmlinux.lds. -AK */ - - t = (jiffies - wall_jiffies) * USEC_PER_TICK + - do_gettimeoffset(); - usec += t; + usec += do_gettimeoffset(); } while (read_seqretry(&xtime_lock, seq)); @@ -165,8 +161,7 @@ int do_settimeofday(struct timespec *tv) write_seqlock_irq(&xtime_lock); - nsec -= do_gettimeoffset() * NSEC_PER_USEC + - (jiffies - wall_jiffies) * NSEC_PER_TICK; + nsec -= do_gettimeoffset() * NSEC_PER_USEC; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); @@ -1071,7 +1066,6 @@ static int timer_resume(struct sys_device *dev) vxtime.last_tsc = get_cycles_sync(); write_sequnlock_irqrestore(&xtime_lock,flags); jiffies += sleep_length; - wall_jiffies += sleep_length; monotonic_base += sleep_length * (NSEC_PER_SEC/HZ); touch_softlockup_watchdog(); return 0; diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index f8aeccf105..b9df2ab652 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S @@ -101,9 +101,6 @@ SECTIONS .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) } vgetcpu_mode = VVIRT(.vgetcpu_mode); - .wall_jiffies : AT(VLOAD(.wall_jiffies)) { *(.wall_jiffies) } - wall_jiffies = VVIRT(.wall_jiffies); - .sys_tz : AT(VLOAD(.sys_tz)) { *(.sys_tz) } sys_tz = VVIRT(.sys_tz); diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c index 07c0863820..a98b460af6 100644 --- a/arch/x86_64/kernel/vsyscall.c +++ b/arch/x86_64/kernel/vsyscall.c @@ -66,8 +66,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv) sequence = read_seqbegin(&__xtime_lock); sec = __xtime.tv_sec; - usec = (__xtime.tv_nsec / 1000) + - (__jiffies - __wall_jiffies) * (1000000 / HZ); + usec = __xtime.tv_nsec / 1000; if (__vxtime.mode != VXTIME_HPET) { t = get_cycles_sync(); diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 241db201f4..37347e3699 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -26,8 +26,6 @@ #include -extern volatile unsigned long wall_jiffies; - DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); @@ -110,7 +108,6 @@ int do_settimeofday(struct timespec *tv) */ ccount = get_ccount(); nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC; - nsec -= (jiffies - wall_jiffies) * CCOUNT_PER_JIFFY * CCOUNT_NSEC; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); @@ -129,7 +126,7 @@ EXPORT_SYMBOL(do_settimeofday); void do_gettimeofday(struct timeval *tv) { unsigned long flags; - unsigned long sec, usec, delta, lost, seq; + unsigned long sec, usec, delta, seq; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); @@ -137,12 +134,9 @@ void do_gettimeofday(struct timeval *tv) delta = get_ccount() - last_ccount_stamp; sec = xtime.tv_sec; usec = (xtime.tv_nsec / NSEC_PER_USEC); - - lost = jiffies - wall_jiffies; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - usec += lost * (1000000UL/HZ) + (delta * CCOUNT_NSEC) / NSEC_PER_USEC; + usec += (delta * CCOUNT_NSEC) / NSEC_PER_USEC; for (; usec >= 1000000; sec++, usec -= 1000000) ; @@ -179,8 +173,7 @@ again: if (ntp_synced() && xtime.tv_sec - last_rtc_update >= 659 && - abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ && - jiffies - wall_jiffies == 1) { + abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ) { if (platform_set_rtc_time(xtime.tv_sec+1) == 0) last_rtc_update = xtime.tv_sec+1; diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index 2281e9399b..fd452fc2c0 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h @@ -17,7 +17,6 @@ enum vsyscall_num { #define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16))) #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16))) -#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16))) #define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) #define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16))) #define __section_sysctl_vsyscall __attribute__ ((unused, __section__ (".sysctl_vsyscall"), aligned(16))) @@ -48,14 +47,12 @@ extern struct vxtime_data __vxtime; extern int __vgetcpu_mode; extern struct timespec __xtime; extern volatile unsigned long __jiffies; -extern unsigned long __wall_jiffies; extern struct timezone __sys_tz; extern seqlock_t __xtime_lock; /* kernel space (writeable) */ extern struct vxtime_data vxtime; extern int vgetcpu_mode; -extern unsigned long wall_jiffies; extern struct timezone sys_tz; extern int sysctl_vsyscall; extern seqlock_t xtime_lock; diff --git a/kernel/timer.c b/kernel/timer.c index 654dd5df24..c1c7fbcffe 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -768,7 +768,7 @@ static int timekeeping_suspended; * @dev: unused * * This is for the generic clocksource timekeeping. - * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are + * xtime/wall_to_monotonic/jiffies/etc are * still managed by arch specific suspend/resume code. */ static int timekeeping_resume(struct sys_device *dev) @@ -1016,9 +1016,6 @@ static inline void calc_load(unsigned long ticks) } } -/* jiffies at the most recent update of wall time */ -unsigned long wall_jiffies = INITIAL_JIFFIES; - /* * This read-write spinlock protects us from races in SMP while * playing with xtime and avenrun. @@ -1056,7 +1053,6 @@ void run_local_timers(void) */ static inline void update_times(unsigned long ticks) { - wall_jiffies += ticks; update_wall_time(); calc_load(ticks); }