2 * NMI watchdog support on APIC systems
4 * Started by Ingo Molnar <mingo@redhat.com>
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
9 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
11 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/nmi.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/sysdev.h>
22 #include <linux/sysctl.h>
23 #include <linux/percpu.h>
24 #include <linux/kprobes.h>
25 #include <linux/cpumask.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/kdebug.h>
28 #include <linux/smp.h>
30 #include <asm/i8259.h>
31 #include <asm/io_apic.h>
34 #include <asm/proto.h>
35 #include <asm/timer.h>
39 #include <mach_traps.h>
41 int unknown_nmi_panic;
42 int nmi_watchdog_enabled;
44 static cpumask_t backtrace_mask = CPU_MASK_NONE;
47 * >0: the lapic NMI watchdog is active, but can be disabled
48 * <0: the lapic NMI watchdog has not been set up, and cannot
50 * 0: the lapic NMI watchdog is disabled, but can be enabled
52 atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
53 EXPORT_SYMBOL(nmi_active);
55 unsigned int nmi_watchdog = NMI_DEFAULT;
56 EXPORT_SYMBOL(nmi_watchdog);
58 static int panic_on_timeout;
60 static unsigned int nmi_hz = HZ;
61 static DEFINE_PER_CPU(short, wd_enabled);
62 static int endflag __initdata;
64 static inline unsigned int get_nmi_count(int cpu)
67 return cpu_pda(cpu)->__nmi_count;
69 return nmi_count(cpu);
73 static inline int mce_in_progress(void)
75 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
76 return atomic_read(&mce_entry) > 0;
82 * Take the local apic timer and PIT/HPET into account. We don't
83 * know which one is active, when we have highres/dyntick on
85 static inline unsigned int get_timer_irqs(int cpu)
88 return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
90 return per_cpu(irq_stat, cpu).apic_timer_irqs +
91 per_cpu(irq_stat, cpu).irq0_irqs;
95 /* Run after command line and cpu_init init, but before all other checks */
96 void nmi_watchdog_default(void)
98 if (nmi_watchdog != NMI_DEFAULT)
100 nmi_watchdog = NMI_NONE;
105 * The performance counters used by NMI_LOCAL_APIC don't trigger when
106 * the CPU is idle. To make sure the NMI watchdog really ticks on all
107 * CPUs during the test make them busy.
109 static __init void nmi_cpu_busy(void *data)
111 local_irq_enable_in_hardirq();
113 * Intentionally don't use cpu_relax here. This is
114 * to make sure that the performance counter really ticks,
115 * even if there is a simulator or similar that catches the
116 * pause instruction. On a real HT machine this is fine because
117 * all other CPUs are busy with "useless" delay loops and don't
118 * care if they get somewhat less cycles.
125 int __init check_nmi_watchdog(void)
127 unsigned int *prev_nmi_count;
130 if (nmi_watchdog == NMI_NONE || nmi_watchdog == NMI_DISABLED)
133 if (!atomic_read(&nmi_active))
136 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
140 printk(KERN_INFO "Testing NMI watchdog ... ");
143 if (nmi_watchdog == NMI_LOCAL_APIC)
144 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
147 for_each_possible_cpu(cpu)
148 prev_nmi_count[cpu] = get_nmi_count(cpu);
150 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
152 for_each_online_cpu(cpu) {
153 if (!per_cpu(wd_enabled, cpu))
155 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
156 printk(KERN_WARNING "WARNING: CPU#%d: NMI "
157 "appears to be stuck (%d->%d)!\n",
161 per_cpu(wd_enabled, cpu) = 0;
162 atomic_dec(&nmi_active);
166 if (!atomic_read(&nmi_active)) {
167 kfree(prev_nmi_count);
168 atomic_set(&nmi_active, -1);
174 * now that we know it works we can reduce NMI frequency to
175 * something more reasonable; makes a difference in some configs
177 if (nmi_watchdog == NMI_LOCAL_APIC)
178 nmi_hz = lapic_adjust_nmi_hz(1);
180 kfree(prev_nmi_count);
183 if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259)
184 disable_8259A_irq(0);
188 static int __init setup_nmi_watchdog(char *str)
192 if (!strncmp(str, "panic", 5)) {
193 panic_on_timeout = 1;
194 str = strchr(str, ',');
200 get_option(&str, &nmi);
202 if (nmi >= NMI_INVALID || nmi < NMI_NONE)
208 __setup("nmi_watchdog=", setup_nmi_watchdog);
211 * Suspend/resume support
215 static int nmi_pm_active; /* nmi_active before suspend */
217 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
219 /* only CPU0 goes here, other CPUs should be offline */
220 nmi_pm_active = atomic_read(&nmi_active);
221 stop_apic_nmi_watchdog(NULL);
222 BUG_ON(atomic_read(&nmi_active) != 0);
226 static int lapic_nmi_resume(struct sys_device *dev)
228 /* only CPU0 goes here, other CPUs should be offline */
229 if (nmi_pm_active > 0) {
230 setup_apic_nmi_watchdog(NULL);
231 touch_nmi_watchdog();
236 static struct sysdev_class nmi_sysclass = {
238 .resume = lapic_nmi_resume,
239 .suspend = lapic_nmi_suspend,
242 static struct sys_device device_lapic_nmi = {
244 .cls = &nmi_sysclass,
247 static int __init init_lapic_nmi_sysfs(void)
252 * should really be a BUG_ON but b/c this is an
253 * init call, it just doesn't work. -dcz
255 if (nmi_watchdog != NMI_LOCAL_APIC)
258 if (atomic_read(&nmi_active) < 0)
261 error = sysdev_class_register(&nmi_sysclass);
263 error = sysdev_register(&device_lapic_nmi);
267 /* must come after the local APIC's device_initcall() */
268 late_initcall(init_lapic_nmi_sysfs);
270 #endif /* CONFIG_PM */
272 static void __acpi_nmi_enable(void *__unused)
274 apic_write_around(APIC_LVT0, APIC_DM_NMI);
278 * Enable timer based NMIs on all CPUs:
280 void acpi_nmi_enable(void)
282 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
283 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
286 static void __acpi_nmi_disable(void *__unused)
288 apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
292 * Disable timer based NMIs on all CPUs:
294 void acpi_nmi_disable(void)
296 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
297 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
300 void setup_apic_nmi_watchdog(void *unused)
302 if (__get_cpu_var(wd_enabled))
305 /* cheap hack to support suspend/resume */
306 /* if cpu0 is not active neither should the other cpus */
307 if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0)
310 switch (nmi_watchdog) {
312 /* enable it before to avoid race with handler */
313 __get_cpu_var(wd_enabled) = 1;
314 if (lapic_watchdog_init(nmi_hz) < 0) {
315 __get_cpu_var(wd_enabled) = 0;
320 __get_cpu_var(wd_enabled) = 1;
321 atomic_inc(&nmi_active);
325 void stop_apic_nmi_watchdog(void *unused)
327 /* only support LOCAL and IO APICs for now */
328 if (nmi_watchdog != NMI_LOCAL_APIC &&
329 nmi_watchdog != NMI_IO_APIC)
331 if (__get_cpu_var(wd_enabled) == 0)
333 if (nmi_watchdog == NMI_LOCAL_APIC)
334 lapic_watchdog_stop();
335 __get_cpu_var(wd_enabled) = 0;
336 atomic_dec(&nmi_active);
340 * the best way to detect whether a CPU has a 'hard lockup' problem
341 * is to check it's local APIC timer IRQ counts. If they are not
342 * changing then that CPU has some problem.
344 * as these watchdog NMI IRQs are generated on every CPU, we only
345 * have to check the current processor.
347 * since NMIs don't listen to _any_ locks, we have to be extremely
348 * careful not to rely on unsafe variables. The printk might lock
349 * up though, so we have to break up any console locks first ...
350 * [when there will be more tty-related locks, break them up here too!]
353 static DEFINE_PER_CPU(unsigned, last_irq_sum);
354 static DEFINE_PER_CPU(local_t, alert_counter);
355 static DEFINE_PER_CPU(int, nmi_touch);
357 void touch_nmi_watchdog(void)
359 if (nmi_watchdog == NMI_LOCAL_APIC ||
360 nmi_watchdog == NMI_IO_APIC) {
364 * Tell other CPUs to reset their alert counters. We cannot
365 * do it ourselves because the alert count increase is not
368 for_each_present_cpu(cpu) {
369 if (per_cpu(nmi_touch, cpu) != 1)
370 per_cpu(nmi_touch, cpu) = 1;
375 * Tickle the softlockup detector too:
377 touch_softlockup_watchdog();
379 EXPORT_SYMBOL(touch_nmi_watchdog);
381 notrace __kprobes int
382 nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
385 * Since current_thread_info()-> is always on the stack, and we
386 * always switch the stack NMI-atomically, it's safe to use
387 * smp_processor_id().
391 int cpu = smp_processor_id();
394 /* check for other users first */
395 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
401 sum = get_timer_irqs(cpu);
403 if (__get_cpu_var(nmi_touch)) {
404 __get_cpu_var(nmi_touch) = 0;
408 if (cpu_isset(cpu, backtrace_mask)) {
409 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
412 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
415 cpu_clear(cpu, backtrace_mask);
418 /* Could check oops_in_progress here too, but it's safer not to */
419 if (mce_in_progress())
422 /* if the none of the timers isn't firing, this cpu isn't doing much */
423 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
425 * Ayiee, looks like this CPU is stuck ...
426 * wait a few IRQs (5 seconds) before doing the oops ...
428 local_inc(&__get_cpu_var(alert_counter));
429 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
431 * die_nmi will return ONLY if NOTIFY_STOP happens..
433 die_nmi("BUG: NMI Watchdog detected LOCKUP",
434 regs, panic_on_timeout);
436 __get_cpu_var(last_irq_sum) = sum;
437 local_set(&__get_cpu_var(alert_counter), 0);
440 /* see if the nmi watchdog went off */
441 if (!__get_cpu_var(wd_enabled))
443 switch (nmi_watchdog) {
445 rc |= lapic_wd_event(nmi_hz);
449 * don't know how to accurately check for this.
450 * just assume it was a watchdog timer interrupt
451 * This matches the old behaviour.
461 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
463 unsigned char reason = get_nmi_reason();
466 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
467 die_nmi(buf, regs, 1); /* Always panic here */
472 * proc handler for /proc/sys/kernel/nmi
474 int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
475 void __user *buffer, size_t *length, loff_t *ppos)
479 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
480 old_state = nmi_watchdog_enabled;
481 proc_dointvec(table, write, file, buffer, length, ppos);
482 if (!!old_state == !!nmi_watchdog_enabled)
485 if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) {
487 "NMI watchdog is permanently disabled\n");
491 /* if nmi_watchdog is not set yet, then set it */
492 nmi_watchdog_default();
495 if (nmi_watchdog == NMI_NONE) {
496 if (lapic_watchdog_ok())
497 nmi_watchdog = NMI_LOCAL_APIC;
499 nmi_watchdog = NMI_IO_APIC;
503 if (nmi_watchdog == NMI_LOCAL_APIC) {
504 if (nmi_watchdog_enabled)
505 enable_lapic_nmi_watchdog();
507 disable_lapic_nmi_watchdog();
510 "NMI watchdog doesn't know what hardware to touch\n");
516 #endif /* CONFIG_SYSCTL */
518 int do_nmi_callback(struct pt_regs *regs, int cpu)
521 if (unknown_nmi_panic)
522 return unknown_nmi_panic_callback(regs, cpu);
527 void __trigger_all_cpu_backtrace(void)
531 backtrace_mask = cpu_online_map;
532 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
533 for (i = 0; i < 10 * 1000; i++) {
534 if (cpus_empty(backtrace_mask))