} ____cacheline_aligned_in_smp;
typedef struct tvec_t_base_s tvec_base_t;
-static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
+
tvec_base_t boot_tvec_bases;
EXPORT_SYMBOL(boot_tvec_bases);
+static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
static inline void set_running_timer(tvec_base_t *base,
struct timer_list *timer)
void fastcall init_timer(struct timer_list *timer)
{
timer->entry.next = NULL;
- timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
+ timer->base = __raw_get_cpu_var(tvec_bases);
}
EXPORT_SYMBOL(init_timer);
static int cascade(tvec_base_t *base, tvec_t *tv, int index)
{
/* cascade all the timers from tv up one level */
- struct list_head *head, *curr;
+ struct timer_list *timer, *tmp;
+ struct list_head tv_list;
+
+ list_replace_init(tv->vec + index, &tv_list);
- head = tv->vec + index;
- curr = head->next;
/*
- * We are removing _all_ timers from the list, so we don't have to
- * detach them individually, just clear the list afterwards.
+ * We are removing _all_ timers from the list, so we
+ * don't have to detach them individually.
*/
- while (curr != head) {
- struct timer_list *tmp;
-
- tmp = list_entry(curr, struct timer_list, entry);
- BUG_ON(tmp->base != base);
- curr = curr->next;
- internal_add_timer(base, tmp);
+ list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
+ BUG_ON(timer->base != base);
+ internal_add_timer(base, timer);
}
- INIT_LIST_HEAD(head);
return index;
}
spin_lock_irq(&base->lock);
while (time_after_eq(jiffies, base->timer_jiffies)) {
- struct list_head work_list = LIST_HEAD_INIT(work_list);
+ struct list_head work_list;
struct list_head *head = &work_list;
int index = base->timer_jiffies & TVR_MASK;
-
+
/*
* Cascade timers:
*/
(!cascade(base, &base->tv3, INDEX(1))) &&
!cascade(base, &base->tv4, INDEX(2)))
cascade(base, &base->tv5, INDEX(3));
- ++base->timer_jiffies;
- list_splice_init(base->tv1.vec + index, &work_list);
+ ++base->timer_jiffies;
+ list_replace_init(base->tv1.vec + index, &work_list);
while (!list_empty(head)) {
void (*fn)(unsigned long);
unsigned long data;
}
spin_unlock(&base->lock);
+ /*
+ * It can happen that other CPUs service timer IRQs and increment
+ * jiffies, but we have not yet got a local timer tick to process
+ * the timer wheels. In that case, the expiry time can be before
+ * jiffies, but since the high-resolution timer here is relative to
+ * jiffies, the default expression when high-resolution timers are
+ * not active,
+ *
+ * time_before(MAX_JIFFY_OFFSET + jiffies, expires)
+ *
+ * would falsely evaluate to true. If that is the case, just
+ * return jiffies so that we can immediately fire the local timer
+ */
+ if (time_before(expires, jiffies))
+ return jiffies;
+
if (time_before(hr_expires, expires))
return hr_expires;
return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
}
+/* XXX - all of this timekeeping code should be later moved to time.c */
+#include <linux/clocksource.h>
+static struct clocksource *clock; /* pointer to current clocksource */
+static cycle_t last_clock_cycle; /* cycle value at last update_wall_time */
/*
- * Using a loop looks inefficient, but "ticks" is
- * usually just one (we shouldn't be losing ticks,
- * we're doing this this way mainly for interrupt
- * latency reasons, not because we think we'll
- * have lots of lost timer ticks
+ * timekeeping_init - Initializes the clocksource and common timekeeping values
*/
-static void update_wall_time(unsigned long ticks)
+void __init timekeeping_init(void)
{
- do {
- ticks--;
+ unsigned long flags;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+ clock = get_next_clocksource();
+ calculate_clocksource_interval(clock, tick_nsec);
+ last_clock_cycle = read_clocksource(clock);
+ ntp_clear();
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+}
+
+
+/*
+ * timekeeping_resume - Resumes the generic timekeeping subsystem.
+ * @dev: unused
+ *
+ * This is for the generic clocksource timekeeping.
+ * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are
+ * still managed by arch specific suspend/resume code.
+ */
+static int timekeeping_resume(struct sys_device *dev)
+{
+ unsigned long flags;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+ /* restart the last cycle value */
+ last_clock_cycle = read_clocksource(clock);
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ return 0;
+}
+
+/* sysfs resume/suspend bits for timekeeping */
+static struct sysdev_class timekeeping_sysclass = {
+ .resume = timekeeping_resume,
+ set_kset_name("timekeeping"),
+};
+
+static struct sys_device device_timer = {
+ .id = 0,
+ .cls = &timekeeping_sysclass,
+};
+
+static int __init timekeeping_init_device(void)
+{
+ int error = sysdev_class_register(&timekeeping_sysclass);
+ if (!error)
+ error = sysdev_register(&device_timer);
+ return error;
+}
+
+device_initcall(timekeeping_init_device);
+
+/*
+ * update_wall_time - Uses the current clocksource to increment the wall time
+ *
+ * Called from the timer interrupt, must hold a write on xtime_lock.
+ */
+static void update_wall_time(void)
+{
+ cycle_t now, offset;
+
+ now = read_clocksource(clock);
+ offset = (now - last_clock_cycle)&clock->mask;
+
+ /* normally this loop will run just once, however in the
+ * case of lost or late ticks, it will accumulate correctly.
+ */
+ while (offset > clock->interval_cycles) {
+ /* accumulate one interval */
+ last_clock_cycle += clock->interval_cycles;
+ offset -= clock->interval_cycles;
+
update_wall_time_one_tick();
if (xtime.tv_nsec >= 1000000000) {
xtime.tv_nsec -= 1000000000;
xtime.tv_sec++;
second_overflow();
}
- } while (ticks);
+ }
}
/*
unsigned long ticks;
ticks = jiffies - wall_jiffies;
- if (ticks) {
- wall_jiffies += ticks;
- update_wall_time(ticks);
- }
+ wall_jiffies += ticks;
+ update_wall_time();
calc_load(ticks);
}
{
int j;
tvec_base_t *base;
+ static char __devinitdata tvec_base_done[NR_CPUS];
- base = per_cpu(tvec_bases, cpu);
- if (!base) {
+ if (!tvec_base_done[cpu]) {
static char boot_done;
- /*
- * Cannot do allocation in init_timers as that runs before the
- * allocator initializes (and would waste memory if there are
- * more possible CPUs than will ever be installed/brought up).
- */
if (boot_done) {
+ /*
+ * The APs use this path later in boot
+ */
base = kmalloc_node(sizeof(*base), GFP_KERNEL,
cpu_to_node(cpu));
if (!base)
return -ENOMEM;
memset(base, 0, sizeof(*base));
+ per_cpu(tvec_bases, cpu) = base;
} else {
- base = &boot_tvec_bases;
+ /*
+ * This is for the boot CPU - we use compile-time
+ * static initialisation because per-cpu memory isn't
+ * ready yet and because the memory allocators are not
+ * initialised either.
+ */
boot_done = 1;
+ base = &boot_tvec_bases;
}
- per_cpu(tvec_bases, cpu) = base;
+ tvec_base_done[cpu] = 1;
+ } else {
+ base = per_cpu(tvec_bases, cpu);
}
+
spin_lock_init(&base->lock);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit timer_cpu_notify(struct notifier_block *self,
+static int timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
return NOTIFY_OK;
}
-static struct notifier_block __devinitdata timers_nb = {
+static struct notifier_block timers_nb = {
.notifier_call = timer_cpu_notify,
};
*/
if (jiffies % INTERPOLATOR_ADJUST == 0)
{
- if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
+ if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
time_interpolator->nsec_per_cyc--;
if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
time_interpolator->nsec_per_cyc++;
unsigned long flags;
/* Sanity check */
- if (ti->frequency == 0 || ti->mask == 0)
- BUG();
+ BUG_ON(ti->frequency == 0 || ti->mask == 0);
ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
spin_lock(&time_interpolator_lock);