1 #include <linux/init.h>
2 #include <linux/string.h>
3 #include <linux/delay.h>
5 #include <linux/module.h>
6 #include <linux/percpu.h>
7 #include <linux/bootmem.h>
8 #include <asm/semaphore.h>
9 #include <asm/processor.h>
13 #include <asm/mmu_context.h>
16 #ifdef CONFIG_X86_LOCAL_APIC
17 #include <asm/mpspec.h>
19 #include <mach_apic.h>
25 DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
26 EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
28 DEFINE_PER_CPU(struct desc_struct, cpu_gdt[GDT_ENTRIES]);
30 DEFINE_PER_CPU(struct i386_pda, _cpu_pda);
31 EXPORT_PER_CPU_SYMBOL(_cpu_pda);
33 static int cachesize_override __cpuinitdata = -1;
34 static int disable_x86_fxsr __cpuinitdata;
35 static int disable_x86_serial_nr __cpuinitdata = 1;
36 static int disable_x86_sep __cpuinitdata;
38 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
40 extern int disable_pse;
42 static void __cpuinit default_init(struct cpuinfo_x86 * c)
44 /* Not much we can do here... */
45 /* Check if at least it has cpuid */
46 if (c->cpuid_level == -1) {
47 /* No cpuid. It must be an ancient CPU */
49 strcpy(c->x86_model_id, "486");
51 strcpy(c->x86_model_id, "386");
55 static struct cpu_dev __cpuinitdata default_cpu = {
56 .c_init = default_init,
57 .c_vendor = "Unknown",
59 static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
61 static int __init cachesize_setup(char *str)
63 get_option (&str, &cachesize_override);
66 __setup("cachesize=", cachesize_setup);
68 int __cpuinit get_model_name(struct cpuinfo_x86 *c)
73 if (cpuid_eax(0x80000000) < 0x80000004)
76 v = (unsigned int *) c->x86_model_id;
77 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
78 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
79 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
80 c->x86_model_id[48] = 0;
82 /* Intel chips right-justify this string for some dumb reason;
83 undo that brain damage */
84 p = q = &c->x86_model_id[0];
90 while ( q <= &c->x86_model_id[48] )
91 *q++ = '\0'; /* Zero-pad the rest */
98 void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
100 unsigned int n, dummy, ecx, edx, l2size;
102 n = cpuid_eax(0x80000000);
104 if (n >= 0x80000005) {
105 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
106 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
107 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
108 c->x86_cache_size=(ecx>>24)+(edx>>24);
111 if (n < 0x80000006) /* Some chips just has a large L1. */
114 ecx = cpuid_ecx(0x80000006);
117 /* do processor-specific cache resizing */
118 if (this_cpu->c_size_cache)
119 l2size = this_cpu->c_size_cache(c,l2size);
121 /* Allow user to override all this if necessary. */
122 if (cachesize_override != -1)
123 l2size = cachesize_override;
126 return; /* Again, no L2 cache is possible */
128 c->x86_cache_size = l2size;
130 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
134 /* Naming convention should be: <Name> [(<Codename>)] */
135 /* This table only is used unless init_<vendor>() below doesn't set it; */
136 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
138 /* Look up CPU names by table lookup. */
139 static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
141 struct cpu_model_info *info;
143 if ( c->x86_model >= 16 )
144 return NULL; /* Range check */
149 info = this_cpu->c_models;
151 while (info && info->family) {
152 if (info->family == c->x86)
153 return info->model_names[c->x86_model];
156 return NULL; /* Not found */
160 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
162 char *v = c->x86_vendor_id;
166 for (i = 0; i < X86_VENDOR_NUM; i++) {
168 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
169 (cpu_devs[i]->c_ident[1] &&
170 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
173 this_cpu = cpu_devs[i];
180 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
181 printk(KERN_ERR "CPU: Your system may be unstable.\n");
183 c->x86_vendor = X86_VENDOR_UNKNOWN;
184 this_cpu = &default_cpu;
188 static int __init x86_fxsr_setup(char * s)
190 /* Tell all the other CPU's to not use it... */
191 disable_x86_fxsr = 1;
194 * ... and clear the bits early in the boot_cpu_data
195 * so that the bootup process doesn't try to do this
198 clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
199 clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
202 __setup("nofxsr", x86_fxsr_setup);
205 static int __init x86_sep_setup(char * s)
210 __setup("nosep", x86_sep_setup);
213 /* Standard macro to see if a specific flag is changeable */
214 static inline int flag_is_changeable_p(u32 flag)
228 : "=&r" (f1), "=&r" (f2)
231 return ((f1^f2) & flag) != 0;
235 /* Probe for the CPUID instruction */
236 static int __cpuinit have_cpuid_p(void)
238 return flag_is_changeable_p(X86_EFLAGS_ID);
241 void __init cpu_detect(struct cpuinfo_x86 *c)
243 /* Get vendor name */
244 cpuid(0x00000000, &c->cpuid_level,
245 (int *)&c->x86_vendor_id[0],
246 (int *)&c->x86_vendor_id[8],
247 (int *)&c->x86_vendor_id[4]);
250 if (c->cpuid_level >= 0x00000001) {
251 u32 junk, tfms, cap0, misc;
252 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
253 c->x86 = (tfms >> 8) & 15;
254 c->x86_model = (tfms >> 4) & 15;
256 c->x86 += (tfms >> 20) & 0xff;
258 c->x86_model += ((tfms >> 16) & 0xF) << 4;
259 c->x86_mask = tfms & 15;
261 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
265 /* Do minimum CPU detection early.
266 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
267 The others are not touched to avoid unwanted side effects.
269 WARNING: this function is only called on the BP. Don't add code here
270 that is supposed to run on all CPUs. */
271 static void __init early_cpu_detect(void)
273 struct cpuinfo_x86 *c = &boot_cpu_data;
275 c->x86_cache_alignment = 32;
282 get_cpu_vendor(c, 1);
285 static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
290 if (have_cpuid_p()) {
291 /* Get vendor name */
292 cpuid(0x00000000, &c->cpuid_level,
293 (int *)&c->x86_vendor_id[0],
294 (int *)&c->x86_vendor_id[8],
295 (int *)&c->x86_vendor_id[4]);
297 get_cpu_vendor(c, 0);
298 /* Initialize the standard set of capabilities */
299 /* Note that the vendor-specific code below might override */
301 /* Intel-defined flags: level 0x00000001 */
302 if ( c->cpuid_level >= 0x00000001 ) {
303 u32 capability, excap;
304 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
305 c->x86_capability[0] = capability;
306 c->x86_capability[4] = excap;
307 c->x86 = (tfms >> 8) & 15;
308 c->x86_model = (tfms >> 4) & 15;
310 c->x86 += (tfms >> 20) & 0xff;
312 c->x86_model += ((tfms >> 16) & 0xF) << 4;
313 c->x86_mask = tfms & 15;
315 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
317 c->apicid = (ebx >> 24) & 0xFF;
319 if (c->x86_capability[0] & (1<<19))
320 c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
322 /* Have CPUID level 0 only - unheard of */
326 /* AMD-defined flags: level 0x80000001 */
327 xlvl = cpuid_eax(0x80000000);
328 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
329 if ( xlvl >= 0x80000001 ) {
330 c->x86_capability[1] = cpuid_edx(0x80000001);
331 c->x86_capability[6] = cpuid_ecx(0x80000001);
333 if ( xlvl >= 0x80000004 )
334 get_model_name(c); /* Default name */
338 early_intel_workaround(c);
341 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
345 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
347 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
348 /* Disable processor serial number */
350 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
352 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
353 printk(KERN_NOTICE "CPU serial number disabled.\n");
354 clear_bit(X86_FEATURE_PN, c->x86_capability);
356 /* Disabling the serial number may affect the cpuid level */
357 c->cpuid_level = cpuid_eax(0);
361 static int __init x86_serial_nr_setup(char *s)
363 disable_x86_serial_nr = 0;
366 __setup("serialnumber", x86_serial_nr_setup);
371 * This does the hard work of actually picking apart the CPU stuff...
373 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
377 c->loops_per_jiffy = loops_per_jiffy;
378 c->x86_cache_size = -1;
379 c->x86_vendor = X86_VENDOR_UNKNOWN;
380 c->cpuid_level = -1; /* CPUID not detected */
381 c->x86_model = c->x86_mask = 0; /* So far unknown... */
382 c->x86_vendor_id[0] = '\0'; /* Unset */
383 c->x86_model_id[0] = '\0'; /* Unset */
384 c->x86_max_cores = 1;
385 c->x86_clflush_size = 32;
386 memset(&c->x86_capability, 0, sizeof c->x86_capability);
388 if (!have_cpuid_p()) {
389 /* First of all, decide if this is a 486 or higher */
390 /* It's a 486 if we can modify the AC flag */
391 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
399 printk(KERN_DEBUG "CPU: After generic identify, caps:");
400 for (i = 0; i < NCAPINTS; i++)
401 printk(" %08lx", c->x86_capability[i]);
404 if (this_cpu->c_identify) {
405 this_cpu->c_identify(c);
407 printk(KERN_DEBUG "CPU: After vendor identify, caps:");
408 for (i = 0; i < NCAPINTS; i++)
409 printk(" %08lx", c->x86_capability[i]);
414 * Vendor-specific initialization. In this section we
415 * canonicalize the feature flags, meaning if there are
416 * features a certain CPU supports which CPUID doesn't
417 * tell us, CPUID claiming incorrect flags, or other bugs,
418 * we handle them here.
420 * At the end of this section, c->x86_capability better
421 * indicate the features this CPU genuinely supports!
423 if (this_cpu->c_init)
426 /* Disable the PN if appropriate */
427 squash_the_stupid_serial_number(c);
430 * The vendor-specific functions might have changed features. Now
431 * we do "generic changes."
436 clear_bit(X86_FEATURE_TSC, c->x86_capability);
439 if (disable_x86_fxsr) {
440 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
441 clear_bit(X86_FEATURE_XMM, c->x86_capability);
446 clear_bit(X86_FEATURE_SEP, c->x86_capability);
449 clear_bit(X86_FEATURE_PSE, c->x86_capability);
451 /* If the model name is still unset, do table lookup. */
452 if ( !c->x86_model_id[0] ) {
454 p = table_lookup_model(c);
456 strcpy(c->x86_model_id, p);
459 sprintf(c->x86_model_id, "%02x/%02x",
460 c->x86, c->x86_model);
463 /* Now the feature flags better reflect actual CPU features! */
465 printk(KERN_DEBUG "CPU: After all inits, caps:");
466 for (i = 0; i < NCAPINTS; i++)
467 printk(" %08lx", c->x86_capability[i]);
471 * On SMP, boot_cpu_data holds the common feature set between
472 * all CPUs; so make sure that we indicate which features are
473 * common between the CPUs. The first time this routine gets
474 * executed, c == &boot_cpu_data.
476 if ( c != &boot_cpu_data ) {
477 /* AND the already accumulated flags with these */
478 for ( i = 0 ; i < NCAPINTS ; i++ )
479 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
482 /* Init Machine Check Exception if available. */
485 if (c == &boot_cpu_data)
489 if (c == &boot_cpu_data)
496 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
498 u32 eax, ebx, ecx, edx;
499 int index_msb, core_bits;
501 cpuid(1, &eax, &ebx, &ecx, &edx);
503 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
506 smp_num_siblings = (ebx & 0xff0000) >> 16;
508 if (smp_num_siblings == 1) {
509 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
510 } else if (smp_num_siblings > 1 ) {
512 if (smp_num_siblings > NR_CPUS) {
513 printk(KERN_WARNING "CPU: Unsupported number of the "
514 "siblings %d", smp_num_siblings);
515 smp_num_siblings = 1;
519 index_msb = get_count_order(smp_num_siblings);
520 c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
522 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
525 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
527 index_msb = get_count_order(smp_num_siblings) ;
529 core_bits = get_count_order(c->x86_max_cores);
531 c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
532 ((1 << core_bits) - 1);
534 if (c->x86_max_cores > 1)
535 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
541 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
545 if (c->x86_vendor < X86_VENDOR_NUM)
546 vendor = this_cpu->c_vendor;
547 else if (c->cpuid_level >= 0)
548 vendor = c->x86_vendor_id;
550 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
551 printk("%s ", vendor);
553 if (!c->x86_model_id[0])
554 printk("%d86", c->x86);
556 printk("%s", c->x86_model_id);
558 if (c->x86_mask || c->cpuid_level >= 0)
559 printk(" stepping %02x\n", c->x86_mask);
564 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
567 * We're emulating future behavior.
568 * In the future, the cpu-specific init functions will be called implicitly
569 * via the magic of initcalls.
570 * They will insert themselves into the cpu_devs structure.
571 * Then, when cpu_init() is called, we can just iterate over that array.
574 extern int intel_cpu_init(void);
575 extern int cyrix_init_cpu(void);
576 extern int nsc_init_cpu(void);
577 extern int amd_init_cpu(void);
578 extern int centaur_init_cpu(void);
579 extern int transmeta_init_cpu(void);
580 extern int rise_init_cpu(void);
581 extern int nexgen_init_cpu(void);
582 extern int umc_init_cpu(void);
584 void __init early_cpu_init(void)
591 transmeta_init_cpu();
597 #ifdef CONFIG_DEBUG_PAGEALLOC
598 /* pse is not compatible with on-the-fly unmapping,
599 * disable it even if the cpus claim to support it.
601 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
606 /* Make sure %gs is initialized properly in idle threads */
607 struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
609 memset(regs, 0, sizeof(struct pt_regs));
610 regs->xfs = __KERNEL_PDA;
614 /* Initial PDA used by boot CPU */
615 struct i386_pda boot_pda = {
618 .pcurrent = &init_task,
621 static inline void set_kernel_fs(void)
623 /* Set %fs for this CPU's PDA. Memory clobber is to create a
624 barrier with respect to any PDA operations, so the compiler
625 doesn't move any before here. */
626 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory");
629 /* Initialize the CPU's GDT and PDA. This is either the boot CPU doing itself
630 (still using cpu_gdt_table), or a CPU doing it for a secondary which
631 will soon come up. */
632 __cpuinit void init_gdt(int cpu, struct task_struct *idle)
634 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
635 struct desc_struct *gdt = per_cpu(cpu_gdt, cpu);
636 struct i386_pda *pda = &per_cpu(_cpu_pda, cpu);
638 memcpy(gdt, cpu_gdt_table, GDT_SIZE);
639 cpu_gdt_descr->address = (unsigned long)gdt;
640 cpu_gdt_descr->size = GDT_SIZE - 1;
642 pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
643 (u32 *)&gdt[GDT_ENTRY_PDA].b,
644 (unsigned long)pda, sizeof(*pda) - 1,
645 0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
647 memset(pda, 0, sizeof(*pda));
649 pda->cpu_number = cpu;
650 pda->pcurrent = idle;
653 void __cpuinit cpu_set_gdt(int cpu)
655 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
657 load_gdt(cpu_gdt_descr);
661 /* Common CPU init for both boot and secondary CPUs */
662 static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
664 struct tss_struct * t = &per_cpu(init_tss, cpu);
665 struct thread_struct *thread = &curr->thread;
667 if (cpu_test_and_set(cpu, cpu_initialized)) {
668 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
669 for (;;) local_irq_enable();
672 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
674 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
675 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
676 if (tsc_disable && cpu_has_tsc) {
677 printk(KERN_NOTICE "Disabling TSC...\n");
678 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
679 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
680 set_in_cr4(X86_CR4_TSD);
683 load_idt(&idt_descr);
686 * Set up and load the per-CPU TSS and LDT
688 atomic_inc(&init_mm.mm_count);
689 curr->active_mm = &init_mm;
692 enter_lazy_tlb(&init_mm, curr);
694 load_esp0(t, thread);
697 load_LDT(&init_mm.context);
699 #ifdef CONFIG_DOUBLEFAULT
700 /* Set up doublefault TSS pointer in the GDT */
701 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
705 asm volatile ("mov %0, %%gs" : : "r" (0));
707 /* Clear all 6 debug registers: */
716 * Force FPU initialization:
718 current_thread_info()->status = 0;
720 mxcsr_feature_mask_init();
723 /* Entrypoint to initialize secondary CPU */
724 void __cpuinit secondary_cpu_init(void)
726 int cpu = smp_processor_id();
727 struct task_struct *curr = current;
729 _cpu_init(cpu, curr);
733 * cpu_init() initializes state that is per-CPU. Some data is already
734 * initialized (naturally) in the bootstrap process, such as the GDT
735 * and IDT. We reload them nevertheless, this function acts as a
736 * 'CPU state barrier', nothing should get across.
738 void __cpuinit cpu_init(void)
740 int cpu = smp_processor_id();
741 struct task_struct *curr = current;
743 /* Set up the real GDT and PDA, so we can transition from the
744 boot_gdt_table & boot_pda. */
747 _cpu_init(cpu, curr);
750 #ifdef CONFIG_HOTPLUG_CPU
751 void __cpuinit cpu_uninit(void)
753 int cpu = raw_smp_processor_id();
754 cpu_clear(cpu, cpu_initialized);
757 per_cpu(cpu_tlbstate, cpu).state = 0;
758 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;