]> err.no Git - linux-2.6/blobdiff - arch/x86/kernel/cpu/amd.c
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6] / arch / x86 / kernel / cpu / amd.c
index 1a3e1bb4d7580a3f64d15fb51066346585a9c060..cae9cabc3031f1e3a2a3d6f8085d20b557026ac3 100644 (file)
@@ -4,8 +4,8 @@
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
-#include <asm/mach_apic.h>
 
+#include <mach_apic.h>
 #include "cpu.h"
 
 /*
 extern void vide(void);
 __asm__(".align 4\nvide: ret");
 
-#ifdef CONFIG_X86_LOCAL_APIC
-#define ENABLE_C1E_MASK         0x18000000
-#define CPUID_PROCESSOR_SIGNATURE       1
-#define CPUID_XFAM              0x0ff00000
-#define CPUID_XFAM_K8           0x00000000
-#define CPUID_XFAM_10H          0x00100000
-#define CPUID_XFAM_11H          0x00200000
-#define CPUID_XMOD              0x000f0000
-#define CPUID_XMOD_REV_F        0x00040000
-
-/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
-static __cpuinit int amd_apic_timer_broken(void)
-{
-       u32 lo, hi;
-       u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
-       switch (eax & CPUID_XFAM) {
-       case CPUID_XFAM_K8:
-               if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
-                       break;
-       case CPUID_XFAM_10H:
-       case CPUID_XFAM_11H:
-               rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
-               if (lo & ENABLE_C1E_MASK) {
-                       if (smp_processor_id() != boot_cpu_physical_apicid)
-                               printk(KERN_INFO "AMD C1E detected late. "
-                                      "        Force timer broadcast.\n");
-                       return 1;
-               }
-               break;
-       default:
-               /* err on the side of caution */
-               return 1;
-       }
-       return 0;
-}
-#endif
-
-int force_mwait __cpuinitdata;
-
 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
 {
        if (cpuid_eax(0x80000000) >= 0x80000007) {
                c->x86_power = cpuid_edx(0x80000007);
                if (c->x86_power & (1<<8))
-                       set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+                       set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
        }
 }
 
@@ -105,9 +66,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 
        /*
         * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-        * DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
+        * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
         */
-       clear_bit(0*32+31, c->x86_capability);
+       clear_cpu_cap(c, 0*32+31);
 
        r = get_model_name(c);
 
@@ -131,8 +92,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                        if (c->x86_model < 6) {
                                /* Based on AMD doc 20734R - June 2000 */
                                if (c->x86_model == 0) {
-                                       clear_bit(X86_FEATURE_APIC, c->x86_capability);
-                                       set_bit(X86_FEATURE_PGE, c->x86_capability);
+                                       clear_cpu_cap(c, X86_FEATURE_APIC);
+                                       set_cpu_cap(c, X86_FEATURE_PGE);
                                }
                                break;
                        }
@@ -208,7 +169,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                                /*  Set MTRR capability flag if appropriate */
                                if (c->x86_model == 13 || c->x86_model == 9 ||
                                   (c->x86_model == 8 && c->x86_mask >= 8))
-                                       set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
+                                       set_cpu_cap(c, X86_FEATURE_K6_MTRR);
                                break;
                        }
 
@@ -231,7 +192,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                                        rdmsr(MSR_K7_HWCR, l, h);
                                        l &= ~0x00008000;
                                        wrmsr(MSR_K7_HWCR, l, h);
-                                       set_bit(X86_FEATURE_XMM, c->x86_capability);
+                                       set_cpu_cap(c, X86_FEATURE_XMM);
                                }
                        }
 
@@ -256,14 +217,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
        /* Use K8 tuning for Fam10h and Fam11h */
        case 0x10:
        case 0x11:
-               set_bit(X86_FEATURE_K8, c->x86_capability);
+               set_cpu_cap(c, X86_FEATURE_K8);
                break;
        case 6:
-               set_bit(X86_FEATURE_K7, c->x86_capability);
+               set_cpu_cap(c, X86_FEATURE_K7);
                break;
        }
        if (c->x86 >= 6)
-               set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
+               set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
 
        display_cacheinfo(c);
 
@@ -297,17 +258,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                        num_cache_leaves = 3;
        }
 
-#ifdef CONFIG_X86_LOCAL_APIC
-       if (amd_apic_timer_broken())
-               local_apic_timer_disabled = 1;
-#endif
-
        /* K6s reports MCEs but don't actually have all the MSRs */
        if (c->x86 < 6)
-               clear_bit(X86_FEATURE_MCE, c->x86_capability);
+               clear_cpu_cap(c, X86_FEATURE_MCE);
 
        if (cpu_has_xmm2)
-               set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
+               set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
 }
 
 static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
@@ -343,10 +299,4 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
        .c_size_cache   = amd_size_cache,
 };
 
-int __init amd_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
-       return 0;
-}
-
 cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);