tvec_base_t boot_tvec_bases;
EXPORT_SYMBOL(boot_tvec_bases);
-static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
+static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
static inline void set_running_timer(tvec_base_t *base,
struct timer_list *timer)
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
+ cpu_relax();
}
}
* This function cascades all vectors and executes all expired timer
* vectors.
*/
-#define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
+#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
static inline void __run_timers(tvec_base_t *base)
{
* specified number of bits to the right of the binary point.
* This function has no side-effects.
*/
-u64 current_tick_length(long shift)
+u64 current_tick_length(void)
{
long delta_nsec;
u64 ret;
* ie: nanosecond value shifted by (SHIFT_SCALE - 10)
*/
delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
- ret = ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
-
- /* convert from (SHIFT_SCALE - 10) to specified shift scale: */
- shift = shift - (SHIFT_SCALE - 10);
- if (shift < 0)
- ret >>= -shift;
- else
- ret <<= shift;
+ ret = (u64)delta_nsec << TICK_LENGTH_SHIFT;
+ ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10));
return ret;
}
/* XXX - all of this timekeeping code should be later moved to time.c */
#include <linux/clocksource.h>
static struct clocksource *clock; /* pointer to current clocksource */
-static cycle_t last_clock_cycle; /* cycle value at last update_wall_time */
#ifdef CONFIG_GENERIC_TIME
/**
s64 ns_offset;
/* read clocksource: */
- cycle_now = read_clocksource(clock);
+ cycle_now = clocksource_read(clock);
/* calculate the delta since the last update_wall_time: */
- cycle_delta = (cycle_now - last_clock_cycle) & clock->mask;
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
/* convert to nanoseconds: */
ns_offset = cyc2ns(clock, cycle_delta);
}
/**
- * get_realtime_clock_ts - Returns the time of day in a timespec
+ * getnstimeofday - Returns the time of day in a timespec
* @ts: pointer to the timespec to be set
*
* Returns the time of day in a timespec.
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+ clock->error = 0;
ntp_clear();
write_sequnlock_irqrestore(&xtime_lock, flags);
struct clocksource *new;
cycle_t now;
u64 nsec;
- new = get_next_clocksource();
+ new = clocksource_get_next();
if (clock != new) {
- now = read_clocksource(new);
+ now = clocksource_read(new);
nsec = __get_nsec_offset();
timespec_add_ns(&xtime, nsec);
clock = new;
- last_clock_cycle = now;
+ clock->cycle_last = now;
printk(KERN_INFO "Time: %s clocksource has been installed.\n",
clock->name);
return 1;
unsigned long flags;
write_seqlock_irqsave(&xtime_lock, flags);
- clock = get_next_clocksource();
- calculate_clocksource_interval(clock, tick_nsec);
- last_clock_cycle = read_clocksource(clock);
+ clock = clocksource_get_next();
+ clocksource_calculate_interval(clock, tick_nsec);
+ clock->cycle_last = clocksource_read(clock);
ntp_clear();
write_sequnlock_irqrestore(&xtime_lock, flags);
}
+static int timekeeping_suspended;
/*
* timekeeping_resume - Resumes the generic timekeeping subsystem.
* @dev: unused
write_seqlock_irqsave(&xtime_lock, flags);
/* restart the last cycle value */
- last_clock_cycle = read_clocksource(clock);
+ clock->cycle_last = clocksource_read(clock);
+ clock->error = 0;
+ timekeeping_suspended = 0;
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ return 0;
+}
+
+static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
+{
+ unsigned long flags;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+ timekeeping_suspended = 1;
write_sequnlock_irqrestore(&xtime_lock, flags);
return 0;
}
/* sysfs resume/suspend bits for timekeeping */
static struct sysdev_class timekeeping_sysclass = {
.resume = timekeeping_resume,
+ .suspend = timekeeping_suspend,
set_kset_name("timekeeping"),
};
device_initcall(timekeeping_init_device);
+/*
+ * If the error is already larger, we look ahead even further
+ * to compensate for late or lost adjustments.
+ */
+static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *offset)
+{
+ s64 tick_error, i;
+ u32 look_ahead, adj;
+ s32 error2, mult;
+
+ /*
+ * Use the current error value to determine how much to look ahead.
+ * The larger the error the slower we adjust for it to avoid problems
+ * with losing too many ticks, otherwise we would overadjust and
+ * produce an even larger error. The smaller the adjustment the
+ * faster we try to adjust for it, as lost ticks can do less harm
+ * here. This is tuned so that an error of about 1 msec is adusted
+ * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
+ */
+ error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
+ error2 = abs(error2);
+ for (look_ahead = 0; error2 > 0; look_ahead++)
+ error2 >>= 2;
+
+ /*
+ * Now calculate the error in (1 << look_ahead) ticks, but first
+ * remove the single look ahead already included in the error.
+ */
+ tick_error = current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1);
+ tick_error -= clock->xtime_interval >> 1;
+ error = ((error - tick_error) >> look_ahead) + tick_error;
+
+ /* Finally calculate the adjustment shift value. */
+ i = *interval;
+ mult = 1;
+ if (error < 0) {
+ error = -error;
+ *interval = -*interval;
+ *offset = -*offset;
+ mult = -1;
+ }
+ for (adj = 0; error > i; adj++)
+ error >>= 1;
+
+ *interval <<= adj;
+ *offset <<= adj;
+ return mult << adj;
+}
+
+/*
+ * Adjust the multiplier to reduce the error value,
+ * this is optimized for the most common adjustments of -1,0,1,
+ * for other values we can do a bit more work.
+ */
+static void clocksource_adjust(struct clocksource *clock, s64 offset)
+{
+ s64 error, interval = clock->cycle_interval;
+ int adj;
+
+ error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
+ if (error > interval) {
+ error >>= 2;
+ if (likely(error <= interval))
+ adj = 1;
+ else
+ adj = clocksource_bigadjust(error, &interval, &offset);
+ } else if (error < -interval) {
+ error >>= 2;
+ if (likely(error >= -interval)) {
+ adj = -1;
+ interval = -interval;
+ offset = -offset;
+ } else
+ adj = clocksource_bigadjust(error, &interval, &offset);
+ } else
+ return;
+
+ clock->mult += adj;
+ clock->xtime_interval += interval;
+ clock->xtime_nsec -= offset;
+ clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift);
+}
+
/*
* update_wall_time - Uses the current clocksource to increment the wall time
*
*/
static void update_wall_time(void)
{
- static s64 remainder_snsecs, error;
- s64 snsecs_per_sec;
- cycle_t now, offset;
+ cycle_t offset;
- snsecs_per_sec = (s64)NSEC_PER_SEC << clock->shift;
- remainder_snsecs += (s64)xtime.tv_nsec << clock->shift;
+ /* Make sure we're fully resumed: */
+ if (unlikely(timekeeping_suspended))
+ return;
- now = read_clocksource(clock);
- offset = (now - last_clock_cycle)&clock->mask;
+#ifdef CONFIG_GENERIC_TIME
+ offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
+#else
+ offset = clock->cycle_interval;
+#endif
+ clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
/* normally this loop will run just once, however in the
* case of lost or late ticks, it will accumulate correctly.
*/
- while (offset > clock->interval_cycles) {
- /* get the ntp interval in clock shifted nanoseconds */
- s64 ntp_snsecs = current_tick_length(clock->shift);
-
+ while (offset >= clock->cycle_interval) {
/* accumulate one interval */
- remainder_snsecs += clock->interval_snsecs;
- last_clock_cycle += clock->interval_cycles;
- offset -= clock->interval_cycles;
+ clock->xtime_nsec += clock->xtime_interval;
+ clock->cycle_last += clock->cycle_interval;
+ offset -= clock->cycle_interval;
+
+ if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
+ clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
+ xtime.tv_sec++;
+ second_overflow();
+ }
/* interpolator bits */
- time_interpolator_update(clock->interval_snsecs
+ time_interpolator_update(clock->xtime_interval
>> clock->shift);
/* increment the NTP state machine */
update_ntp_one_tick();
/* accumulate error between NTP and clock interval */
- error += (ntp_snsecs - (s64)clock->interval_snsecs);
+ clock->error += current_tick_length();
+ clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
+ }
- /* correct the clock when NTP error is too big */
- remainder_snsecs += make_ntp_adj(clock, offset, &error);
+ /* correct the clock when NTP error is too big */
+ clocksource_adjust(clock, offset);
- if (remainder_snsecs >= snsecs_per_sec) {
- remainder_snsecs -= snsecs_per_sec;
- xtime.tv_sec++;
- second_overflow();
- }
- }
/* store full nanoseconds into xtime */
- xtime.tv_nsec = remainder_snsecs >> clock->shift;
- remainder_snsecs -= (s64)xtime.tv_nsec << clock->shift;
+ xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
+ clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
/* check to see if there is a new clocksource to use */
if (change_clocksource()) {
- error = 0;
- remainder_snsecs = 0;
- calculate_clocksource_interval(clock, tick_nsec);
+ clock->error = 0;
+ clock->xtime_nsec = 0;
+ clocksource_calculate_interval(clock, tick_nsec);
}
}
* playing with xtime and avenrun.
*/
#ifndef ARCH_HAVE_XTIME_LOCK
-seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
EXPORT_SYMBOL(xtime_lock);
#endif
}
/*
- * Accessing ->group_leader->real_parent is not SMP-safe, it could
- * change from under us. However, rather than getting any lock
- * we can use an optimistic algorithm: get the parent
- * pid, and go back and check that the parent is still
- * the same. If it has changed (which is extremely unlikely
- * indeed), we just try again..
- *
- * NOTE! This depends on the fact that even if we _do_
- * get an old value of "parent", we can happily dereference
- * the pointer (it was and remains a dereferencable kernel pointer
- * no matter what): we just can't necessarily trust the result
- * until we know that the parent pointer is valid.
- *
- * NOTE2: ->group_leader never changes from under us.
+ * Accessing ->real_parent is not SMP-safe, it could
+ * change from under us. However, we can use a stale
+ * value of ->real_parent under rcu_read_lock(), see
+ * release_task()->call_rcu(delayed_put_task_struct).
*/
asmlinkage long sys_getppid(void)
{
int pid;
- struct task_struct *me = current;
- struct task_struct *parent;
- parent = me->group_leader->real_parent;
- for (;;) {
- pid = parent->tgid;
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
-{
- struct task_struct *old = parent;
+ rcu_read_lock();
+ pid = rcu_dereference(current->real_parent)->tgid;
+ rcu_read_unlock();
- /*
- * Make sure we read the pid before re-reading the
- * parent pointer:
- */
- smp_rmb();
- parent = me->group_leader->real_parent;
- if (old != parent)
- continue;
-}
-#endif
- break;
- }
return pid;
}
static void process_timeout(unsigned long __data)
{
- wake_up_process((task_t *)__data);
+ wake_up_process((struct task_struct *)__data);
}
/**
return 0;
}
+/*
+ * lockdep: we want to track each per-CPU base as a separate lock-class,
+ * but timer-bases are kmalloc()-ed, so we need to attach separate
+ * keys to them:
+ */
+static struct lock_class_key base_lock_keys[NR_CPUS];
+
static int __devinit init_timers_cpu(int cpu)
{
int j;
}
spin_lock_init(&base->lock);
+ lockdep_set_class(&base->lock, base_lock_keys + cpu);
+
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
INIT_LIST_HEAD(base->tv4.vec + j);
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int timer_cpu_notify(struct notifier_block *self,
+static int __cpuinit timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
return NOTIFY_OK;
}
-static struct notifier_block timers_nb = {
+static struct notifier_block __cpuinitdata timers_nb = {
.notifier_call = timer_cpu_notify,
};