]> err.no Git - linux-2.6/commitdiff
Revert perfctr reservation to 2.6.21 state
authorAndi Kleen <ak@suse.de>
Tue, 3 Jul 2007 23:38:13 +0000 (01:38 +0200)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 4 Jul 2007 01:11:35 +0000 (18:11 -0700)
With this change it works again when the nmi watchdog is disabled.

Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Björn Steinbrink <B.Steinbrink@gmx.de>
Cc: Stephane Eranian <eranian@hpl.hp.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/i386/kernel/cpu/perfctr-watchdog.c

index f0b67630b90da11f16dc3864cc1f3f1d9108d6e9..4d26d514c56f97aa37d8977b280565682fd4a2aa 100644 (file)
@@ -55,14 +55,45 @@ static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
 /* converts an msr to an appropriate reservation bit */
 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
 {
-       return wd_ops ? msr - wd_ops->perfctr : 0;
+       /* returns the bit offset of the performance counter register */
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               return (msr - MSR_K7_PERFCTR0);
+       case X86_VENDOR_INTEL:
+               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+                       return (msr - MSR_ARCH_PERFMON_PERFCTR0);
+
+               switch (boot_cpu_data.x86) {
+               case 6:
+                       return (msr - MSR_P6_PERFCTR0);
+               case 15:
+                       return (msr - MSR_P4_BPU_PERFCTR0);
+               }
+       }
+       return 0;
 }
 
 /* converts an msr to an appropriate reservation bit */
 /* returns the bit offset of the event selection register */
 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
 {
-       return wd_ops ? msr - wd_ops->evntsel : 0;
+       /* returns the bit offset of the event selection register */
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               return (msr - MSR_K7_EVNTSEL0);
+       case X86_VENDOR_INTEL:
+               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+                       return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
+
+               switch (boot_cpu_data.x86) {
+               case 6:
+                       return (msr - MSR_P6_EVNTSEL0);
+               case 15:
+                       return (msr - MSR_P4_BSU_ESCR0);
+               }
+       }
+       return 0;
+
 }
 
 /* checks for a bit availability (hack for oprofile) */