1 /* local apic based NMI watchdog for various CPUs.
2 This file also handles reservation of performance counters for coordination
3 with other users (like oprofile).
5 Note that these events normally don't tick when the CPU idles. This means
6 the frequency varies with CPU load.
8 Original code for K7/P6 written by Keith Owens */
10 #include <linux/percpu.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/bitops.h>
14 #include <linux/smp.h>
15 #include <linux/nmi.h>
17 #include <asm/intel_arch_perfmon.h>
19 struct nmi_watchdog_ctlblk {
20 unsigned int cccr_msr;
21 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
22 unsigned int evntsel_msr; /* the MSR to select the events to handle */
25 /* Interface defining a CPU specific perfctr watchdog */
28 void (*unreserve)(void);
29 int (*setup)(unsigned nmi_hz);
30 void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
37 static const struct wd_ops *wd_ops;
39 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
42 #define NMI_MAX_COUNTER_BITS 66
44 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
45 * evtsel_nmi_owner tracks the ownership of the event selection
46 * - different performance counters/ event selection may be reserved for
47 * different subsystems this reservation system just tries to coordinate
50 static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
51 static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
53 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
55 /* converts an msr to an appropriate reservation bit */
56 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
58 /* returns the bit offset of the performance counter register */
59 switch (boot_cpu_data.x86_vendor) {
61 return (msr - MSR_K7_PERFCTR0);
62 case X86_VENDOR_INTEL:
63 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
64 return (msr - MSR_ARCH_PERFMON_PERFCTR0);
66 switch (boot_cpu_data.x86) {
68 return (msr - MSR_P6_PERFCTR0);
70 return (msr - MSR_P4_BPU_PERFCTR0);
76 /* converts an msr to an appropriate reservation bit */
77 /* returns the bit offset of the event selection register */
78 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
80 /* returns the bit offset of the event selection register */
81 switch (boot_cpu_data.x86_vendor) {
83 return (msr - MSR_K7_EVNTSEL0);
84 case X86_VENDOR_INTEL:
85 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
86 return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
88 switch (boot_cpu_data.x86) {
90 return (msr - MSR_P6_EVNTSEL0);
92 return (msr - MSR_P4_BSU_ESCR0);
99 /* checks for a bit availability (hack for oprofile) */
100 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
102 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
104 return (!test_bit(counter, perfctr_nmi_owner));
107 /* checks the an msr for availability */
108 int avail_to_resrv_perfctr_nmi(unsigned int msr)
110 unsigned int counter;
112 counter = nmi_perfctr_msr_to_bit(msr);
113 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
115 return (!test_bit(counter, perfctr_nmi_owner));
118 int reserve_perfctr_nmi(unsigned int msr)
120 unsigned int counter;
122 counter = nmi_perfctr_msr_to_bit(msr);
123 /* register not managed by the allocator? */
124 if (counter > NMI_MAX_COUNTER_BITS)
127 if (!test_and_set_bit(counter, perfctr_nmi_owner))
132 void release_perfctr_nmi(unsigned int msr)
134 unsigned int counter;
136 counter = nmi_perfctr_msr_to_bit(msr);
137 /* register not managed by the allocator? */
138 if (counter > NMI_MAX_COUNTER_BITS)
141 clear_bit(counter, perfctr_nmi_owner);
144 int reserve_evntsel_nmi(unsigned int msr)
146 unsigned int counter;
148 counter = nmi_evntsel_msr_to_bit(msr);
149 /* register not managed by the allocator? */
150 if (counter > NMI_MAX_COUNTER_BITS)
153 if (!test_and_set_bit(counter, evntsel_nmi_owner))
158 void release_evntsel_nmi(unsigned int msr)
160 unsigned int counter;
162 counter = nmi_evntsel_msr_to_bit(msr);
163 /* register not managed by the allocator? */
164 if (counter > NMI_MAX_COUNTER_BITS)
167 clear_bit(counter, evntsel_nmi_owner);
170 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
171 EXPORT_SYMBOL(reserve_perfctr_nmi);
172 EXPORT_SYMBOL(release_perfctr_nmi);
173 EXPORT_SYMBOL(reserve_evntsel_nmi);
174 EXPORT_SYMBOL(release_evntsel_nmi);
176 void disable_lapic_nmi_watchdog(void)
178 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
180 if (atomic_read(&nmi_active) <= 0)
183 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
188 BUG_ON(atomic_read(&nmi_active) != 0);
191 void enable_lapic_nmi_watchdog(void)
193 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
195 /* are we already enabled */
196 if (atomic_read(&nmi_active) != 0)
199 /* are we lapic aware */
202 if (!wd_ops->reserve()) {
203 printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n");
207 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
208 touch_nmi_watchdog();
212 * Activate the NMI watchdog via the local APIC.
215 static unsigned int adjust_for_32bit_ctr(unsigned int hz)
218 unsigned int retval = hz;
221 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
222 * are writable, with higher bits sign extending from bit 31.
223 * So, we can only program the counter with 31 bit values and
224 * 32nd bit should be 1, for 33.. to be 1.
225 * Find the appropriate nmi_hz
227 counter_val = (u64)cpu_khz * 1000;
228 do_div(counter_val, retval);
229 if (counter_val > 0x7fffffffULL) {
230 u64 count = (u64)cpu_khz * 1000;
231 do_div(count, 0x7fffffffUL);
238 write_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi_hz)
240 u64 count = (u64)cpu_khz * 1000;
242 do_div(count, nmi_hz);
244 Dprintk("setting %s to -0x%08Lx\n", descr, count);
245 wrmsrl(perfctr_msr, 0 - count);
248 static void write_watchdog_counter32(unsigned int perfctr_msr,
249 const char *descr, unsigned nmi_hz)
251 u64 count = (u64)cpu_khz * 1000;
253 do_div(count, nmi_hz);
255 Dprintk("setting %s to -0x%08Lx\n", descr, count);
256 wrmsr(perfctr_msr, (u32)(-count), 0);
259 /* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface
260 nicely stable so there is not much variety */
262 #define K7_EVNTSEL_ENABLE (1 << 22)
263 #define K7_EVNTSEL_INT (1 << 20)
264 #define K7_EVNTSEL_OS (1 << 17)
265 #define K7_EVNTSEL_USR (1 << 16)
266 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
267 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
269 static int setup_k7_watchdog(unsigned nmi_hz)
271 unsigned int perfctr_msr, evntsel_msr;
272 unsigned int evntsel;
273 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
275 perfctr_msr = wd_ops->perfctr;
276 evntsel_msr = wd_ops->evntsel;
278 wrmsrl(perfctr_msr, 0UL);
280 evntsel = K7_EVNTSEL_INT
285 /* setup the timer */
286 wrmsr(evntsel_msr, evntsel, 0);
287 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
288 apic_write(APIC_LVTPC, APIC_DM_NMI);
289 evntsel |= K7_EVNTSEL_ENABLE;
290 wrmsr(evntsel_msr, evntsel, 0);
292 wd->perfctr_msr = perfctr_msr;
293 wd->evntsel_msr = evntsel_msr;
294 wd->cccr_msr = 0; //unused
298 static void single_msr_stop_watchdog(void)
300 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
302 wrmsr(wd->evntsel_msr, 0, 0);
305 static int single_msr_reserve(void)
307 if (!reserve_perfctr_nmi(wd_ops->perfctr))
310 if (!reserve_evntsel_nmi(wd_ops->evntsel)) {
311 release_perfctr_nmi(wd_ops->perfctr);
317 static void single_msr_unreserve(void)
319 release_evntsel_nmi(wd_ops->evntsel);
320 release_perfctr_nmi(wd_ops->perfctr);
323 static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
325 /* start the cycle over again */
326 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
329 static const struct wd_ops k7_wd_ops = {
330 .reserve = single_msr_reserve,
331 .unreserve = single_msr_unreserve,
332 .setup = setup_k7_watchdog,
333 .rearm = single_msr_rearm,
334 .stop = single_msr_stop_watchdog,
335 .perfctr = MSR_K7_PERFCTR0,
336 .evntsel = MSR_K7_EVNTSEL0,
337 .checkbit = 1ULL<<47,
340 /* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
342 #define P6_EVNTSEL0_ENABLE (1 << 22)
343 #define P6_EVNTSEL_INT (1 << 20)
344 #define P6_EVNTSEL_OS (1 << 17)
345 #define P6_EVNTSEL_USR (1 << 16)
346 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
347 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
349 static int setup_p6_watchdog(unsigned nmi_hz)
351 unsigned int perfctr_msr, evntsel_msr;
352 unsigned int evntsel;
353 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
355 perfctr_msr = wd_ops->perfctr;
356 evntsel_msr = wd_ops->evntsel;
358 /* KVM doesn't implement this MSR */
359 if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
362 evntsel = P6_EVNTSEL_INT
367 /* setup the timer */
368 wrmsr(evntsel_msr, evntsel, 0);
369 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
370 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
371 apic_write(APIC_LVTPC, APIC_DM_NMI);
372 evntsel |= P6_EVNTSEL0_ENABLE;
373 wrmsr(evntsel_msr, evntsel, 0);
375 wd->perfctr_msr = perfctr_msr;
376 wd->evntsel_msr = evntsel_msr;
377 wd->cccr_msr = 0; //unused
381 static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
383 /* P6 based Pentium M need to re-unmask
384 * the apic vector but it doesn't hurt
386 * ArchPerfom/Core Duo also needs this */
387 apic_write(APIC_LVTPC, APIC_DM_NMI);
388 /* P6/ARCH_PERFMON has 32 bit counter write */
389 write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
392 static const struct wd_ops p6_wd_ops = {
393 .reserve = single_msr_reserve,
394 .unreserve = single_msr_unreserve,
395 .setup = setup_p6_watchdog,
397 .stop = single_msr_stop_watchdog,
398 .perfctr = MSR_P6_PERFCTR0,
399 .evntsel = MSR_P6_EVNTSEL0,
400 .checkbit = 1ULL<<39,
403 /* Intel P4 performance counters. By far the most complicated of all. */
405 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
406 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
407 #define P4_ESCR_OS (1<<3)
408 #define P4_ESCR_USR (1<<2)
409 #define P4_CCCR_OVF_PMI0 (1<<26)
410 #define P4_CCCR_OVF_PMI1 (1<<27)
411 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
412 #define P4_CCCR_COMPLEMENT (1<<19)
413 #define P4_CCCR_COMPARE (1<<18)
414 #define P4_CCCR_REQUIRED (3<<16)
415 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
416 #define P4_CCCR_ENABLE (1<<12)
417 #define P4_CCCR_OVF (1<<31)
419 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
420 CRU_ESCR0 (with any non-null event selector) through a complemented
421 max threshold. [IA32-Vol3, Section 14.9.9] */
423 static int setup_p4_watchdog(unsigned nmi_hz)
425 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
426 unsigned int evntsel, cccr_val;
427 unsigned int misc_enable, dummy;
429 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
431 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
432 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
436 /* detect which hyperthread we are on */
437 if (smp_num_siblings == 2) {
438 unsigned int ebx, apicid;
441 apicid = (ebx >> 24) & 0xff;
447 /* performance counters are shared resources
448 * assign each hyperthread its own set
449 * (re-use the ESCR0 register, seems safe
450 * and keeps the cccr_val the same)
454 perfctr_msr = MSR_P4_IQ_PERFCTR0;
455 evntsel_msr = MSR_P4_CRU_ESCR0;
456 cccr_msr = MSR_P4_IQ_CCCR0;
457 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
460 perfctr_msr = MSR_P4_IQ_PERFCTR1;
461 evntsel_msr = MSR_P4_CRU_ESCR0;
462 cccr_msr = MSR_P4_IQ_CCCR1;
463 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
466 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
470 cccr_val |= P4_CCCR_THRESHOLD(15)
475 wrmsr(evntsel_msr, evntsel, 0);
476 wrmsr(cccr_msr, cccr_val, 0);
477 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
478 apic_write(APIC_LVTPC, APIC_DM_NMI);
479 cccr_val |= P4_CCCR_ENABLE;
480 wrmsr(cccr_msr, cccr_val, 0);
481 wd->perfctr_msr = perfctr_msr;
482 wd->evntsel_msr = evntsel_msr;
483 wd->cccr_msr = cccr_msr;
487 static void stop_p4_watchdog(void)
489 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
490 wrmsr(wd->cccr_msr, 0, 0);
491 wrmsr(wd->evntsel_msr, 0, 0);
494 static int p4_reserve(void)
496 if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0))
499 if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1))
502 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
504 /* RED-PEN why is ESCR1 not reserved here? */
508 if (smp_num_siblings > 1)
509 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
512 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
516 static void p4_unreserve(void)
519 if (smp_num_siblings > 1)
520 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
522 release_evntsel_nmi(MSR_P4_CRU_ESCR0);
523 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
526 static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
531 * - An overflown perfctr will assert its interrupt
532 * until the OVF flag in its CCCR is cleared.
533 * - LVTPC is masked on interrupt and must be
534 * unmasked by the LVTPC handler.
536 rdmsrl(wd->cccr_msr, dummy);
537 dummy &= ~P4_CCCR_OVF;
538 wrmsrl(wd->cccr_msr, dummy);
539 apic_write(APIC_LVTPC, APIC_DM_NMI);
540 /* start the cycle over again */
541 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
544 static const struct wd_ops p4_wd_ops = {
545 .reserve = p4_reserve,
546 .unreserve = p4_unreserve,
547 .setup = setup_p4_watchdog,
549 .stop = stop_p4_watchdog,
550 /* RED-PEN this is wrong for the other sibling */
551 .perfctr = MSR_P4_BPU_PERFCTR0,
552 .evntsel = MSR_P4_BSU_ESCR0,
553 .checkbit = 1ULL<<39,
556 /* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully
557 all future Intel CPUs. */
559 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
560 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
562 static struct wd_ops intel_arch_wd_ops;
564 static int setup_intel_arch_watchdog(unsigned nmi_hz)
567 union cpuid10_eax eax;
569 unsigned int perfctr_msr, evntsel_msr;
570 unsigned int evntsel;
571 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
574 * Check whether the Architectural PerfMon supports
575 * Unhalted Core Cycles Event or not.
576 * NOTE: Corresponding bit = 0 in ebx indicates event present.
578 cpuid(10, &(eax.full), &ebx, &unused, &unused);
579 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
580 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
583 perfctr_msr = wd_ops->perfctr;
584 evntsel_msr = wd_ops->evntsel;
586 wrmsrl(perfctr_msr, 0UL);
588 evntsel = ARCH_PERFMON_EVENTSEL_INT
589 | ARCH_PERFMON_EVENTSEL_OS
590 | ARCH_PERFMON_EVENTSEL_USR
591 | ARCH_PERFMON_NMI_EVENT_SEL
592 | ARCH_PERFMON_NMI_EVENT_UMASK;
594 /* setup the timer */
595 wrmsr(evntsel_msr, evntsel, 0);
596 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
597 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
598 apic_write(APIC_LVTPC, APIC_DM_NMI);
599 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
600 wrmsr(evntsel_msr, evntsel, 0);
602 wd->perfctr_msr = perfctr_msr;
603 wd->evntsel_msr = evntsel_msr;
604 wd->cccr_msr = 0; //unused
605 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
609 static struct wd_ops intel_arch_wd_ops __read_mostly = {
610 .reserve = single_msr_reserve,
611 .unreserve = single_msr_unreserve,
612 .setup = setup_intel_arch_watchdog,
614 .stop = single_msr_stop_watchdog,
615 .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
616 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
619 static void probe_nmi_watchdog(void)
621 switch (boot_cpu_data.x86_vendor) {
623 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
624 boot_cpu_data.x86 != 16)
628 case X86_VENDOR_INTEL:
629 /* Work around Core Duo (Yonah) errata AE49 where perfctr1
630 doesn't have a working enable bit. */
631 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
632 intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0;
633 intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0;
635 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
636 wd_ops = &intel_arch_wd_ops;
639 switch (boot_cpu_data.x86) {
641 if (boot_cpu_data.x86_model > 0xd)
656 /* Interface to nmi.c */
658 int lapic_watchdog_init(unsigned nmi_hz)
661 probe_nmi_watchdog();
663 printk(KERN_INFO "NMI watchdog: CPU not supported\n");
667 if (!wd_ops->reserve()) {
669 "NMI watchdog: cannot reserve perfctrs\n");
674 if (!(wd_ops->setup(nmi_hz))) {
675 printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n",
676 raw_smp_processor_id());
683 void lapic_watchdog_stop(void)
689 unsigned lapic_adjust_nmi_hz(unsigned hz)
691 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
692 if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
693 wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1)
694 hz = adjust_for_32bit_ctr(hz);
698 int lapic_wd_event(unsigned nmi_hz)
700 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
702 rdmsrl(wd->perfctr_msr, ctr);
703 if (ctr & wd_ops->checkbit) { /* perfctr still running? */
706 wd_ops->rearm(wd, nmi_hz);
710 int lapic_watchdog_ok(void)
712 return wd_ops != NULL;