1 #include <linux/init.h>
2 #include <linux/kernel.h>
3 #include <linux/sched.h>
4 #include <linux/string.h>
5 #include <linux/bootmem.h>
6 #include <linux/bitops.h>
7 #include <linux/module.h>
8 #include <linux/kgdb.h>
9 #include <linux/topology.h>
10 #include <linux/string.h>
11 #include <linux/delay.h>
12 #include <linux/smp.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <asm/processor.h>
19 #include <asm/linkage.h>
20 #include <asm/mmu_context.h>
25 #ifdef CONFIG_X86_LOCAL_APIC
26 #include <asm/mpspec.h>
28 #include <mach_apic.h>
31 #include <asm/pgtable.h>
32 #include <asm/processor.h>
34 #include <asm/atomic.h>
35 #include <asm/proto.h>
36 #include <asm/sections.h>
37 #include <asm/setup.h>
38 #include <asm/genapic.h>
42 /* We need valid kernel segments for data and code in long mode too
43 * IRET will check the segment types kkeil 2000/10/28
44 * Also sysret mandates a special GDT layout
46 /* The TLS descriptors are currently at a different place compared to i386.
47 Hopefully nobody expects them at a fixed place (Wine?) */
48 DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
49 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
50 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
51 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
52 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
53 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
54 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
56 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
58 __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
60 /* Current gdt points %fs at the "master" per-cpu area: after this,
61 * it's on the real one. */
62 void switch_to_new_gdt(void)
64 struct desc_ptr gdt_descr;
66 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
67 gdt_descr.size = GDT_SIZE - 1;
71 struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
73 static void __cpuinit default_init(struct cpuinfo_x86 *c)
78 static struct cpu_dev __cpuinitdata default_cpu = {
79 .c_init = default_init,
80 .c_vendor = "Unknown",
82 static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
84 int __cpuinit get_model_name(struct cpuinfo_x86 *c)
88 if (c->extended_cpuid_level < 0x80000004)
91 v = (unsigned int *) c->x86_model_id;
92 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
93 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
94 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
95 c->x86_model_id[48] = 0;
100 void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
102 unsigned int n, dummy, ebx, ecx, edx;
104 n = c->extended_cpuid_level;
106 if (n >= 0x80000005) {
107 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
108 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
109 "D cache %dK (%d bytes/line)\n",
110 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
111 c->x86_cache_size = (ecx>>24) + (edx>>24);
112 /* On K8 L1 TLB is inclusive, so don't count it */
116 if (n >= 0x80000006) {
117 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
118 ecx = cpuid_ecx(0x80000006);
119 c->x86_cache_size = ecx >> 16;
120 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
122 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
123 c->x86_cache_size, ecx & 0xFF);
127 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
130 u32 eax, ebx, ecx, edx;
131 int index_msb, core_bits;
133 cpuid(1, &eax, &ebx, &ecx, &edx);
136 if (!cpu_has(c, X86_FEATURE_HT))
138 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
141 smp_num_siblings = (ebx & 0xff0000) >> 16;
143 if (smp_num_siblings == 1) {
144 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
145 } else if (smp_num_siblings > 1) {
147 if (smp_num_siblings > NR_CPUS) {
148 printk(KERN_WARNING "CPU: Unsupported number of "
149 "siblings %d", smp_num_siblings);
150 smp_num_siblings = 1;
154 index_msb = get_count_order(smp_num_siblings);
155 c->phys_proc_id = phys_pkg_id(index_msb);
157 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
159 index_msb = get_count_order(smp_num_siblings);
161 core_bits = get_count_order(c->x86_max_cores);
163 c->cpu_core_id = phys_pkg_id(index_msb) &
164 ((1 << core_bits) - 1);
167 if ((c->x86_max_cores * smp_num_siblings) > 1) {
168 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
170 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
177 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
179 char *v = c->x86_vendor_id;
183 for (i = 0; i < X86_VENDOR_NUM; i++) {
185 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
186 (cpu_devs[i]->c_ident[1] &&
187 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
189 this_cpu = cpu_devs[i];
196 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
197 printk(KERN_ERR "CPU: Your system may be unstable.\n");
199 c->x86_vendor = X86_VENDOR_UNKNOWN;
202 static void __init early_cpu_support_print(void)
205 struct cpu_dev *cpu_devx;
207 printk("KERNEL supported cpus:\n");
208 for (i = 0; i < X86_VENDOR_NUM; i++) {
209 cpu_devx = cpu_devs[i];
212 for (j = 0; j < 2; j++) {
213 if (!cpu_devx->c_ident[j])
215 printk(" %s %s\n", cpu_devx->c_vendor,
216 cpu_devx->c_ident[j]);
221 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
223 void __init early_cpu_init(void)
225 struct cpu_vendor_dev *cvdev;
227 for (cvdev = __x86cpuvendor_start ;
228 cvdev < __x86cpuvendor_end ;
230 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
231 early_cpu_support_print();
232 early_identify_cpu(&boot_cpu_data);
235 /* Do some early cpuid on the boot CPU to get some parameter that are
236 needed before check_bugs. Everything advanced is in identify_cpu
238 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
242 c->loops_per_jiffy = loops_per_jiffy;
243 c->x86_cache_size = -1;
244 c->x86_vendor = X86_VENDOR_UNKNOWN;
245 c->x86_model = c->x86_mask = 0; /* So far unknown... */
246 c->x86_vendor_id[0] = '\0'; /* Unset */
247 c->x86_model_id[0] = '\0'; /* Unset */
248 c->x86_clflush_size = 64;
249 c->x86_cache_alignment = c->x86_clflush_size;
250 c->x86_max_cores = 1;
251 c->x86_coreid_bits = 0;
252 c->extended_cpuid_level = 0;
253 memset(&c->x86_capability, 0, sizeof c->x86_capability);
255 /* Get vendor name */
256 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
257 (unsigned int *)&c->x86_vendor_id[0],
258 (unsigned int *)&c->x86_vendor_id[8],
259 (unsigned int *)&c->x86_vendor_id[4]);
263 /* Initialize the standard set of capabilities */
264 /* Note that the vendor-specific code below might override */
266 /* Intel-defined flags: level 0x00000001 */
267 if (c->cpuid_level >= 0x00000001) {
269 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
270 &c->x86_capability[0]);
271 c->x86 = (tfms >> 8) & 0xf;
272 c->x86_model = (tfms >> 4) & 0xf;
273 c->x86_mask = tfms & 0xf;
275 c->x86 += (tfms >> 20) & 0xff;
277 c->x86_model += ((tfms >> 16) & 0xF) << 4;
278 if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
279 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
281 /* Have CPUID level 0 only - unheard of */
285 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
287 c->phys_proc_id = c->initial_apicid;
289 /* AMD-defined flags: level 0x80000001 */
290 xlvl = cpuid_eax(0x80000000);
291 c->extended_cpuid_level = xlvl;
292 if ((xlvl & 0xffff0000) == 0x80000000) {
293 if (xlvl >= 0x80000001) {
294 c->x86_capability[1] = cpuid_edx(0x80000001);
295 c->x86_capability[6] = cpuid_ecx(0x80000001);
297 if (xlvl >= 0x80000004)
298 get_model_name(c); /* Default name */
301 /* Transmeta-defined flags: level 0x80860001 */
302 xlvl = cpuid_eax(0x80860000);
303 if ((xlvl & 0xffff0000) == 0x80860000) {
304 /* Don't set x86_cpuid_level here for now to not confuse. */
305 if (xlvl >= 0x80860001)
306 c->x86_capability[2] = cpuid_edx(0x80860001);
309 c->extended_cpuid_level = cpuid_eax(0x80000000);
310 if (c->extended_cpuid_level >= 0x80000007)
311 c->x86_power = cpuid_edx(0x80000007);
313 if (c->extended_cpuid_level >= 0x80000008) {
314 u32 eax = cpuid_eax(0x80000008);
316 c->x86_virt_bits = (eax >> 8) & 0xff;
317 c->x86_phys_bits = eax & 0xff;
320 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
321 cpu_devs[c->x86_vendor]->c_early_init)
322 cpu_devs[c->x86_vendor]->c_early_init(c);
324 validate_pat_support(c);
326 /* early_param could clear that, but recall get it set again */
328 clear_cpu_cap(c, X86_FEATURE_APIC);
332 * This does the hard work of actually picking apart the CPU stuff...
334 static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
338 early_identify_cpu(c);
340 init_scattered_cpuid_features(c);
342 c->apicid = phys_pkg_id(0);
345 * Vendor-specific initialization. In this section we
346 * canonicalize the feature flags, meaning if there are
347 * features a certain CPU supports which CPUID doesn't
348 * tell us, CPUID claiming incorrect flags, or other bugs,
349 * we handle them here.
351 * At the end of this section, c->x86_capability better
352 * indicate the features this CPU genuinely supports!
354 if (this_cpu->c_init)
360 * On SMP, boot_cpu_data holds the common feature set between
361 * all CPUs; so make sure that we indicate which features are
362 * common between the CPUs. The first time this routine gets
363 * executed, c == &boot_cpu_data.
365 if (c != &boot_cpu_data) {
366 /* AND the already accumulated flags with these */
367 for (i = 0; i < NCAPINTS; i++)
368 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
371 /* Clear all flags overriden by options */
372 for (i = 0; i < NCAPINTS; i++)
373 c->x86_capability[i] &= ~cleared_cpu_caps[i];
375 #ifdef CONFIG_X86_MCE
378 select_idle_routine(c);
381 numa_add_cpu(smp_processor_id());
386 void __cpuinit identify_boot_cpu(void)
388 identify_cpu(&boot_cpu_data);
391 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
393 BUG_ON(c == &boot_cpu_data);
398 static __init int setup_noclflush(char *arg)
400 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
403 __setup("noclflush", setup_noclflush);
405 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
407 if (c->x86_model_id[0])
408 printk(KERN_CONT "%s", c->x86_model_id);
410 if (c->x86_mask || c->cpuid_level >= 0)
411 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
413 printk(KERN_CONT "\n");
416 static __init int setup_disablecpuid(char *arg)
419 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
420 setup_clear_cpu_cap(bit);
425 __setup("clearcpuid=", setup_disablecpuid);
427 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
429 struct x8664_pda **_cpu_pda __read_mostly;
430 EXPORT_SYMBOL(_cpu_pda);
432 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
434 char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
436 unsigned long __supported_pte_mask __read_mostly = ~0UL;
437 EXPORT_SYMBOL_GPL(__supported_pte_mask);
439 static int do_not_nx __cpuinitdata;
442 Control non executable mappings for 64bit processes.
447 static int __init nonx_setup(char *str)
451 if (!strncmp(str, "on", 2)) {
452 __supported_pte_mask |= _PAGE_NX;
454 } else if (!strncmp(str, "off", 3)) {
456 __supported_pte_mask &= ~_PAGE_NX;
460 early_param("noexec", nonx_setup);
462 int force_personality32;
465 Control non executable heap for 32bit processes.
466 To control the stack too use noexec=off
468 on PROT_READ does not imply PROT_EXEC for 32bit processes (default)
469 off PROT_READ implies PROT_EXEC
471 static int __init nonx32_setup(char *str)
473 if (!strcmp(str, "on"))
474 force_personality32 &= ~READ_IMPLIES_EXEC;
475 else if (!strcmp(str, "off"))
476 force_personality32 |= READ_IMPLIES_EXEC;
479 __setup("noexec32=", nonx32_setup);
481 void pda_init(int cpu)
483 struct x8664_pda *pda = cpu_pda(cpu);
485 /* Setup up data that may be needed in __get_free_pages early */
488 /* Memory clobbers used to order PDA accessed */
490 wrmsrl(MSR_GS_BASE, pda);
493 pda->cpunumber = cpu;
495 pda->kernelstack = (unsigned long)stack_thread_info() -
496 PDA_STACKOFFSET + THREAD_SIZE;
497 pda->active_mm = &init_mm;
501 /* others are initialized in smpboot.c */
502 pda->pcurrent = &init_task;
503 pda->irqstackptr = boot_cpu_stack;
505 pda->irqstackptr = (char *)
506 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
507 if (!pda->irqstackptr)
508 panic("cannot allocate irqstack for cpu %d", cpu);
510 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
511 pda->nodenumber = cpu_to_node(cpu);
514 pda->irqstackptr += IRQSTACKSIZE-64;
517 char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
518 DEBUG_STKSZ] __page_aligned_bss;
520 extern asmlinkage void ignore_sysret(void);
522 /* May not be marked __init: used by software suspend */
523 void syscall_init(void)
526 * LSTAR and STAR live in a bit strange symbiosis.
527 * They both write to the same internal register. STAR allows to
528 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
530 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
531 wrmsrl(MSR_LSTAR, system_call);
532 wrmsrl(MSR_CSTAR, ignore_sysret);
534 #ifdef CONFIG_IA32_EMULATION
535 syscall32_cpu_init();
538 /* Flags to clear on syscall */
539 wrmsrl(MSR_SYSCALL_MASK,
540 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
543 void __cpuinit check_efer(void)
547 rdmsrl(MSR_EFER, efer);
548 if (!(efer & EFER_NX) || do_not_nx)
549 __supported_pte_mask &= ~_PAGE_NX;
552 unsigned long kernel_eflags;
555 * Copies of the original ist values from the tss are only accessed during
556 * debugging, no special alignment required.
558 DEFINE_PER_CPU(struct orig_ist, orig_ist);
561 * cpu_init() initializes state that is per-CPU. Some data is already
562 * initialized (naturally) in the bootstrap process, such as the GDT
563 * and IDT. We reload them nevertheless, this function acts as a
564 * 'CPU state barrier', nothing should get across.
565 * A lot of state is already set up in PDA init.
567 void __cpuinit cpu_init(void)
569 int cpu = stack_smp_processor_id();
570 struct tss_struct *t = &per_cpu(init_tss, cpu);
571 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
573 char *estacks = NULL;
574 struct task_struct *me;
577 /* CPU 0 is initialised in head64.c */
581 estacks = boot_exception_stacks;
585 if (cpu_test_and_set(cpu, cpu_initialized))
586 panic("CPU#%d already initialized!\n", cpu);
588 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
590 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
593 * Initialize the per-CPU GDT with the boot GDT,
594 * and set up the GDT descriptor:
598 load_idt((const struct desc_ptr *)&idt_descr);
600 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
603 wrmsrl(MSR_FS_BASE, 0);
604 wrmsrl(MSR_KERNEL_GS_BASE, 0);
610 * set up and load the per-CPU TSS
612 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
613 static const unsigned int order[N_EXCEPTION_STACKS] = {
614 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
615 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
618 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
620 panic("Cannot allocate exception stack %ld %d\n",
623 estacks += PAGE_SIZE << order[v];
624 orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
627 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
629 * <= is required because the CPU will access up to
630 * 8 bits beyond the end of the IO permission bitmap.
632 for (i = 0; i <= IO_BITMAP_LONGS; i++)
633 t->io_bitmap[i] = ~0UL;
635 atomic_inc(&init_mm.mm_count);
636 me->active_mm = &init_mm;
639 enter_lazy_tlb(&init_mm, me);
641 load_sp0(t, ¤t->thread);
642 set_tss_desc(cpu, t);
644 load_LDT(&init_mm.context);
648 * If the kgdb is connected no debug regs should be altered. This
649 * is only applicable when KGDB and a KGDB I/O module are built
650 * into the kernel and you are using early debugging with
651 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
653 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
654 arch_kgdb_ops.correct_hw_break();
658 * Clear all 6 debug registers:
661 set_debugreg(0UL, 0);
662 set_debugreg(0UL, 1);
663 set_debugreg(0UL, 2);
664 set_debugreg(0UL, 3);
665 set_debugreg(0UL, 6);
666 set_debugreg(0UL, 7);
668 /* If the kgdb is connected no debug regs should be altered. */
674 raw_local_save_flags(kernel_eflags);