2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/acpi.h>
34 #include <linux/kallsyms.h>
35 #include <linux/edd.h>
36 #include <linux/mmzone.h>
37 #include <linux/kexec.h>
38 #include <linux/cpufreq.h>
39 #include <linux/dmi.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/ctype.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/proto.h>
56 #include <asm/setup.h>
57 #include <asm/mach_apic.h>
59 #include <asm/sections.h>
61 #include <asm/cacheflush.h>
68 struct cpuinfo_x86 boot_cpu_data __read_mostly;
69 EXPORT_SYMBOL(boot_cpu_data);
71 unsigned long mmu_cr4_features;
73 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
76 unsigned long saved_video_mode;
78 int force_mwait __cpuinitdata;
84 char dmi_alloc_data[DMI_MAX_DATA];
89 struct screen_info screen_info;
90 EXPORT_SYMBOL(screen_info);
91 struct sys_desc_table_struct {
92 unsigned short length;
93 unsigned char table[0];
96 struct edid_info edid_info;
97 EXPORT_SYMBOL_GPL(edid_info);
99 extern int root_mountflags;
101 char __initdata command_line[COMMAND_LINE_SIZE];
103 struct resource standard_io_resources[] = {
104 { .name = "dma1", .start = 0x00, .end = 0x1f,
105 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
106 { .name = "pic1", .start = 0x20, .end = 0x21,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "timer0", .start = 0x40, .end = 0x43,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "timer1", .start = 0x50, .end = 0x53,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "keyboard", .start = 0x60, .end = 0x6f,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "pic2", .start = 0xa0, .end = 0xa1,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "dma2", .start = 0xc0, .end = 0xdf,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "fpu", .start = 0xf0, .end = 0xff,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
124 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
126 struct resource data_resource = {
127 .name = "Kernel data",
130 .flags = IORESOURCE_RAM,
132 struct resource code_resource = {
133 .name = "Kernel code",
136 .flags = IORESOURCE_RAM,
138 struct resource bss_resource = {
139 .name = "Kernel bss",
142 .flags = IORESOURCE_RAM,
145 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
147 #ifdef CONFIG_PROC_VMCORE
148 /* elfcorehdr= specifies the location of elf core header
149 * stored by the crashed kernel. This option will be passed
150 * by kexec loader to the capture kernel.
152 static int __init setup_elfcorehdr(char *arg)
157 elfcorehdr_addr = memparse(arg, &end);
158 return end > arg ? 0 : -EINVAL;
160 early_param("elfcorehdr", setup_elfcorehdr);
165 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
167 unsigned long bootmap_size, bootmap;
169 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
170 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
172 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
173 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
174 e820_register_active_regions(0, start_pfn, end_pfn);
175 free_bootmem_with_active_regions(0, end_pfn);
176 reserve_bootmem(bootmap, bootmap_size);
180 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
182 #ifdef CONFIG_EDD_MODULE
186 * copy_edd() - Copy the BIOS EDD information
187 * from boot_params into a safe place.
190 static inline void copy_edd(void)
192 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
193 sizeof(edd.mbr_signature));
194 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
195 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
196 edd.edd_info_nr = boot_params.eddbuf_entries;
199 static inline void copy_edd(void)
205 static void __init reserve_crashkernel(void)
207 unsigned long long free_mem;
208 unsigned long long crash_size, crash_base;
211 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
213 ret = parse_crashkernel(boot_command_line, free_mem,
214 &crash_size, &crash_base);
215 if (ret == 0 && crash_size) {
216 if (crash_base > 0) {
217 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
218 "for crashkernel (System RAM: %ldMB)\n",
219 (unsigned long)(crash_size >> 20),
220 (unsigned long)(crash_base >> 20),
221 (unsigned long)(free_mem >> 20));
222 crashk_res.start = crash_base;
223 crashk_res.end = crash_base + crash_size - 1;
224 reserve_bootmem(crash_base, crash_size);
226 printk(KERN_INFO "crashkernel reservation failed - "
227 "you have to specify a base address\n");
231 static inline void __init reserve_crashkernel(void)
235 #define EBDA_ADDR_POINTER 0x40E
237 unsigned __initdata ebda_addr;
238 unsigned __initdata ebda_size;
240 static void discover_ebda(void)
243 * there is a real-mode segmented pointer pointing to the
244 * 4K EBDA area at 0x40E
246 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
249 ebda_size = *(unsigned short *)__va(ebda_addr);
251 /* Round EBDA up to pages */
255 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
256 if (ebda_size > 64*1024)
260 void __init setup_arch(char **cmdline_p)
262 printk(KERN_INFO "Command line: %s\n", boot_command_line);
264 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
265 screen_info = boot_params.screen_info;
266 edid_info = boot_params.edid_info;
267 saved_video_mode = boot_params.hdr.vid_mode;
268 bootloader_type = boot_params.hdr.type_of_loader;
270 #ifdef CONFIG_BLK_DEV_RAM
271 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
272 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
273 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
275 setup_memory_region();
278 if (!boot_params.hdr.root_flags)
279 root_mountflags &= ~MS_RDONLY;
280 init_mm.start_code = (unsigned long) &_text;
281 init_mm.end_code = (unsigned long) &_etext;
282 init_mm.end_data = (unsigned long) &_edata;
283 init_mm.brk = (unsigned long) &_end;
285 code_resource.start = virt_to_phys(&_text);
286 code_resource.end = virt_to_phys(&_etext)-1;
287 data_resource.start = virt_to_phys(&_etext);
288 data_resource.end = virt_to_phys(&_edata)-1;
289 bss_resource.start = virt_to_phys(&__bss_start);
290 bss_resource.end = virt_to_phys(&__bss_stop)-1;
292 early_identify_cpu(&boot_cpu_data);
294 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
295 *cmdline_p = command_line;
299 finish_e820_parsing();
301 e820_register_active_regions(0, 0, -1UL);
303 * partially used pages are not usable - thus
304 * we are rounding upwards:
306 end_pfn = e820_end_of_ram();
307 num_physpages = end_pfn;
313 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
320 /* setup to use the static apicid table during kernel startup */
321 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
326 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
327 * Call this early for SRAT node setup.
329 acpi_boot_table_init();
332 /* How many end-of-memory variables you have, grandma! */
333 max_low_pfn = end_pfn;
335 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
337 /* Remove active ranges so rediscovery with NUMA-awareness happens */
338 remove_all_active_ranges();
340 #ifdef CONFIG_ACPI_NUMA
342 * Parse SRAT to discover nodes.
348 numa_initmem_init(0, end_pfn);
350 contig_initmem_init(0, end_pfn);
353 /* Reserve direct mapping */
354 reserve_bootmem_generic(table_start << PAGE_SHIFT,
355 (table_end - table_start) << PAGE_SHIFT);
358 reserve_bootmem_generic(__pa_symbol(&_text),
359 __pa_symbol(&_end) - __pa_symbol(&_text));
362 * reserve physical page 0 - it's a special BIOS page on many boxes,
363 * enabling clean reboots, SMP operation, laptop functions.
365 reserve_bootmem_generic(0, PAGE_SIZE);
367 /* reserve ebda region */
369 reserve_bootmem_generic(ebda_addr, ebda_size);
371 /* reserve nodemap region */
373 reserve_bootmem_generic(nodemap_addr, nodemap_size);
377 /* Reserve SMP trampoline */
378 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
381 #ifdef CONFIG_ACPI_SLEEP
383 * Reserve low memory region for sleep support.
385 acpi_reserve_bootmem();
388 * Find and reserve possible boot-time SMP configuration:
391 #ifdef CONFIG_BLK_DEV_INITRD
392 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
393 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
394 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
395 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
396 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
398 if (ramdisk_end <= end_of_mem) {
399 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
400 initrd_start = ramdisk_image + PAGE_OFFSET;
401 initrd_end = initrd_start+ramdisk_size;
403 printk(KERN_ERR "initrd extends beyond end of memory "
404 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
405 ramdisk_end, end_of_mem);
410 reserve_crashkernel();
416 * set this early, so we dont allocate cpu0
417 * if MADT list doesnt list BSP first
418 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
420 cpu_set(0, cpu_present_map);
423 * Read APIC and some other early information from ACPI tables.
431 * get boot-time SMP configuration:
433 if (smp_found_config)
435 init_apic_mappings();
438 * We trust e820 completely. No explicit ROM probing in memory.
440 e820_reserve_resources();
441 e820_mark_nosave_regions();
445 /* request I/O space for devices used on all i[345]86 PCs */
446 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
447 request_resource(&ioport_resource, &standard_io_resources[i]);
453 #if defined(CONFIG_VGA_CONSOLE)
454 conswitchp = &vga_con;
455 #elif defined(CONFIG_DUMMY_CONSOLE)
456 conswitchp = &dummy_con;
461 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
465 if (c->extended_cpuid_level < 0x80000004)
468 v = (unsigned int *) c->x86_model_id;
469 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
470 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
471 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
472 c->x86_model_id[48] = 0;
477 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
479 unsigned int n, dummy, eax, ebx, ecx, edx;
481 n = c->extended_cpuid_level;
483 if (n >= 0x80000005) {
484 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
485 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
486 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
487 c->x86_cache_size=(ecx>>24)+(edx>>24);
488 /* On K8 L1 TLB is inclusive, so don't count it */
492 if (n >= 0x80000006) {
493 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
494 ecx = cpuid_ecx(0x80000006);
495 c->x86_cache_size = ecx >> 16;
496 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
498 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
499 c->x86_cache_size, ecx & 0xFF);
503 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
504 if (n >= 0x80000008) {
505 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
506 c->x86_virt_bits = (eax >> 8) & 0xff;
507 c->x86_phys_bits = eax & 0xff;
512 static int nearby_node(int apicid)
515 for (i = apicid - 1; i >= 0; i--) {
516 int node = apicid_to_node[i];
517 if (node != NUMA_NO_NODE && node_online(node))
520 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
521 int node = apicid_to_node[i];
522 if (node != NUMA_NO_NODE && node_online(node))
525 return first_node(node_online_map); /* Shouldn't happen */
530 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
531 * Assumes number of cores is a power of two.
533 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
538 int cpu = smp_processor_id();
540 unsigned apicid = hard_smp_processor_id();
542 unsigned ecx = cpuid_ecx(0x80000008);
544 c->x86_max_cores = (ecx & 0xff) + 1;
546 /* CPU telling us the core id bits shift? */
547 bits = (ecx >> 12) & 0xF;
549 /* Otherwise recompute */
551 while ((1 << bits) < c->x86_max_cores)
555 /* Low order bits define the core id (index of core in socket) */
556 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
557 /* Convert the APIC ID into the socket ID */
558 c->phys_proc_id = phys_pkg_id(bits);
561 node = c->phys_proc_id;
562 if (apicid_to_node[apicid] != NUMA_NO_NODE)
563 node = apicid_to_node[apicid];
564 if (!node_online(node)) {
565 /* Two possibilities here:
566 - The CPU is missing memory and no node was created.
567 In that case try picking one from a nearby CPU
568 - The APIC IDs differ from the HyperTransport node IDs
569 which the K8 northbridge parsing fills in.
570 Assume they are all increased by a constant offset,
571 but in the same order as the HT nodeids.
572 If that doesn't result in a usable node fall back to the
573 path for the previous case. */
574 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
575 if (ht_nodeid >= 0 &&
576 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
577 node = apicid_to_node[ht_nodeid];
578 /* Pick a nearby node */
579 if (!node_online(node))
580 node = nearby_node(apicid);
582 numa_set_node(cpu, node);
584 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
589 #define ENABLE_C1E_MASK 0x18000000
590 #define CPUID_PROCESSOR_SIGNATURE 1
591 #define CPUID_XFAM 0x0ff00000
592 #define CPUID_XFAM_K8 0x00000000
593 #define CPUID_XFAM_10H 0x00100000
594 #define CPUID_XFAM_11H 0x00200000
595 #define CPUID_XMOD 0x000f0000
596 #define CPUID_XMOD_REV_F 0x00040000
598 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
599 static __cpuinit int amd_apic_timer_broken(void)
602 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
603 switch (eax & CPUID_XFAM) {
605 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
609 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
610 if (lo & ENABLE_C1E_MASK)
614 /* err on the side of caution */
620 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
628 * Disable TLB flush filter by setting HWCR.FFDIS on K8
629 * bit 6 of msr C001_0015
631 * Errata 63 for SH-B3 steppings
632 * Errata 122 for all steppings (F+ have it disabled by default)
635 rdmsrl(MSR_K8_HWCR, value);
637 wrmsrl(MSR_K8_HWCR, value);
641 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
642 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
643 clear_bit(0*32+31, &c->x86_capability);
645 /* On C+ stepping K8 rep microcode works well for copy/memset */
646 level = cpuid_eax(1);
647 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
648 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
649 if (c->x86 == 0x10 || c->x86 == 0x11)
650 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
652 /* Enable workaround for FXSAVE leak */
654 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
656 level = get_model_name(c);
660 /* Should distinguish Models here, but this is only
661 a fallback anyways. */
662 strcpy(c->x86_model_id, "Hammer");
666 display_cacheinfo(c);
668 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
669 if (c->x86_power & (1<<8))
670 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
672 /* Multi core CPU? */
673 if (c->extended_cpuid_level >= 0x80000008)
676 if (c->extended_cpuid_level >= 0x80000006 &&
677 (cpuid_edx(0x80000006) & 0xf000))
678 num_cache_leaves = 4;
680 num_cache_leaves = 3;
682 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
683 set_bit(X86_FEATURE_K8, &c->x86_capability);
685 /* RDTSC can be speculated around */
686 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
688 /* Family 10 doesn't support C states in MWAIT so don't use it */
689 if (c->x86 == 0x10 && !force_mwait)
690 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
692 if (amd_apic_timer_broken())
693 disable_apic_timer = 1;
696 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
699 u32 eax, ebx, ecx, edx;
700 int index_msb, core_bits;
702 cpuid(1, &eax, &ebx, &ecx, &edx);
705 if (!cpu_has(c, X86_FEATURE_HT))
707 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
710 smp_num_siblings = (ebx & 0xff0000) >> 16;
712 if (smp_num_siblings == 1) {
713 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
714 } else if (smp_num_siblings > 1 ) {
716 if (smp_num_siblings > NR_CPUS) {
717 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
718 smp_num_siblings = 1;
722 index_msb = get_count_order(smp_num_siblings);
723 c->phys_proc_id = phys_pkg_id(index_msb);
725 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
727 index_msb = get_count_order(smp_num_siblings) ;
729 core_bits = get_count_order(c->x86_max_cores);
731 c->cpu_core_id = phys_pkg_id(index_msb) &
732 ((1 << core_bits) - 1);
735 if ((c->x86_max_cores * smp_num_siblings) > 1) {
736 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
737 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
744 * find out the number of processor cores on the die
746 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
750 if (c->cpuid_level < 4)
753 cpuid_count(4, 0, &eax, &t, &t, &t);
756 return ((eax >> 26) + 1);
761 static void srat_detect_node(void)
765 int cpu = smp_processor_id();
766 int apicid = hard_smp_processor_id();
768 /* Don't do the funky fallback heuristics the AMD version employs
770 node = apicid_to_node[apicid];
771 if (node == NUMA_NO_NODE)
772 node = first_node(node_online_map);
773 numa_set_node(cpu, node);
775 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
779 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
784 init_intel_cacheinfo(c);
785 if (c->cpuid_level > 9 ) {
786 unsigned eax = cpuid_eax(10);
787 /* Check for version and the number of counters */
788 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
789 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
794 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
796 set_bit(X86_FEATURE_BTS, c->x86_capability);
798 set_bit(X86_FEATURE_PEBS, c->x86_capability);
801 n = c->extended_cpuid_level;
802 if (n >= 0x80000008) {
803 unsigned eax = cpuid_eax(0x80000008);
804 c->x86_virt_bits = (eax >> 8) & 0xff;
805 c->x86_phys_bits = eax & 0xff;
806 /* CPUID workaround for Intel 0F34 CPU */
807 if (c->x86_vendor == X86_VENDOR_INTEL &&
808 c->x86 == 0xF && c->x86_model == 0x3 &&
810 c->x86_phys_bits = 36;
814 c->x86_cache_alignment = c->x86_clflush_size * 2;
815 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
816 (c->x86 == 0x6 && c->x86_model >= 0x0e))
817 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
819 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
821 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
823 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
824 c->x86_max_cores = intel_num_cpu_cores(c);
829 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
831 char *v = c->x86_vendor_id;
833 if (!strcmp(v, "AuthenticAMD"))
834 c->x86_vendor = X86_VENDOR_AMD;
835 else if (!strcmp(v, "GenuineIntel"))
836 c->x86_vendor = X86_VENDOR_INTEL;
838 c->x86_vendor = X86_VENDOR_UNKNOWN;
841 struct cpu_model_info {
844 char *model_names[16];
847 /* Do some early cpuid on the boot CPU to get some parameter that are
848 needed before check_bugs. Everything advanced is in identify_cpu
850 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
854 c->loops_per_jiffy = loops_per_jiffy;
855 c->x86_cache_size = -1;
856 c->x86_vendor = X86_VENDOR_UNKNOWN;
857 c->x86_model = c->x86_mask = 0; /* So far unknown... */
858 c->x86_vendor_id[0] = '\0'; /* Unset */
859 c->x86_model_id[0] = '\0'; /* Unset */
860 c->x86_clflush_size = 64;
861 c->x86_cache_alignment = c->x86_clflush_size;
862 c->x86_max_cores = 1;
863 c->extended_cpuid_level = 0;
864 memset(&c->x86_capability, 0, sizeof c->x86_capability);
866 /* Get vendor name */
867 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
868 (unsigned int *)&c->x86_vendor_id[0],
869 (unsigned int *)&c->x86_vendor_id[8],
870 (unsigned int *)&c->x86_vendor_id[4]);
874 /* Initialize the standard set of capabilities */
875 /* Note that the vendor-specific code below might override */
877 /* Intel-defined flags: level 0x00000001 */
878 if (c->cpuid_level >= 0x00000001) {
880 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
881 &c->x86_capability[0]);
882 c->x86 = (tfms >> 8) & 0xf;
883 c->x86_model = (tfms >> 4) & 0xf;
884 c->x86_mask = tfms & 0xf;
886 c->x86 += (tfms >> 20) & 0xff;
888 c->x86_model += ((tfms >> 16) & 0xF) << 4;
889 if (c->x86_capability[0] & (1<<19))
890 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
892 /* Have CPUID level 0 only - unheard of */
897 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
902 * This does the hard work of actually picking apart the CPU stuff...
904 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
909 early_identify_cpu(c);
911 /* AMD-defined flags: level 0x80000001 */
912 xlvl = cpuid_eax(0x80000000);
913 c->extended_cpuid_level = xlvl;
914 if ((xlvl & 0xffff0000) == 0x80000000) {
915 if (xlvl >= 0x80000001) {
916 c->x86_capability[1] = cpuid_edx(0x80000001);
917 c->x86_capability[6] = cpuid_ecx(0x80000001);
919 if (xlvl >= 0x80000004)
920 get_model_name(c); /* Default name */
923 /* Transmeta-defined flags: level 0x80860001 */
924 xlvl = cpuid_eax(0x80860000);
925 if ((xlvl & 0xffff0000) == 0x80860000) {
926 /* Don't set x86_cpuid_level here for now to not confuse. */
927 if (xlvl >= 0x80860001)
928 c->x86_capability[2] = cpuid_edx(0x80860001);
931 init_scattered_cpuid_features(c);
933 c->apicid = phys_pkg_id(0);
936 * Vendor-specific initialization. In this section we
937 * canonicalize the feature flags, meaning if there are
938 * features a certain CPU supports which CPUID doesn't
939 * tell us, CPUID claiming incorrect flags, or other bugs,
940 * we handle them here.
942 * At the end of this section, c->x86_capability better
943 * indicate the features this CPU genuinely supports!
945 switch (c->x86_vendor) {
950 case X86_VENDOR_INTEL:
954 case X86_VENDOR_UNKNOWN:
956 display_cacheinfo(c);
960 select_idle_routine(c);
964 * On SMP, boot_cpu_data holds the common feature set between
965 * all CPUs; so make sure that we indicate which features are
966 * common between the CPUs. The first time this routine gets
967 * executed, c == &boot_cpu_data.
969 if (c != &boot_cpu_data) {
970 /* AND the already accumulated flags with these */
971 for (i = 0 ; i < NCAPINTS ; i++)
972 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
975 #ifdef CONFIG_X86_MCE
978 if (c != &boot_cpu_data)
981 numa_add_cpu(smp_processor_id());
986 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
988 if (c->x86_model_id[0])
989 printk("%s", c->x86_model_id);
991 if (c->x86_mask || c->cpuid_level >= 0)
992 printk(" stepping %02x\n", c->x86_mask);
998 * Get CPU information for use by the procfs.
1001 static int show_cpuinfo(struct seq_file *m, void *v)
1003 struct cpuinfo_x86 *c = v;
1007 * These flag bits must match the definitions in <asm/cpufeature.h>.
1008 * NULL means this bit is undefined or reserved; either way it doesn't
1009 * have meaning as far as Linux is concerned. Note that it's important
1010 * to realize there is a difference between this table and CPUID -- if
1011 * applications want to get the raw CPUID data, they should access
1012 * /dev/cpu/<cpu_nr>/cpuid instead.
1014 static const char *const x86_cap_flags[] = {
1016 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1017 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1018 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1019 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1022 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1023 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1024 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1025 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1026 "3dnowext", "3dnow",
1028 /* Transmeta-defined */
1029 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1034 /* Other (Linux-defined) */
1035 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1036 NULL, NULL, NULL, NULL,
1037 "constant_tsc", "up", NULL, "arch_perfmon",
1038 "pebs", "bts", NULL, "sync_rdtsc",
1039 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1040 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1042 /* Intel-defined (#2) */
1043 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1044 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1045 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1046 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1048 /* VIA/Cyrix/Centaur-defined */
1049 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1050 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1051 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1052 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1054 /* AMD-defined (#2) */
1055 "lahf_lm", "cmp_legacy", "svm", "extapic",
1056 "cr8_legacy", "abm", "sse4a", "misalignsse",
1057 "3dnowprefetch", "osvw", "ibs", "sse5",
1058 "skinit", "wdt", NULL, NULL,
1059 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1060 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1062 /* Auxiliary (Linux-defined) */
1063 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1066 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1068 static const char *const x86_power_flags[] = {
1069 "ts", /* temperature sensor */
1070 "fid", /* frequency id control */
1071 "vid", /* voltage id control */
1072 "ttp", /* thermal trip */
1077 "", /* tsc invariant mapped to constant_tsc */
1086 seq_printf(m,"processor\t: %u\n"
1088 "cpu family\t: %d\n"
1090 "model name\t: %s\n",
1092 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1095 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1097 if (c->x86_mask || c->cpuid_level >= 0)
1098 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1100 seq_printf(m, "stepping\t: unknown\n");
1102 if (cpu_has(c,X86_FEATURE_TSC)) {
1103 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1106 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1107 freq / 1000, (freq % 1000));
1111 if (c->x86_cache_size >= 0)
1112 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1115 if (smp_num_siblings * c->x86_max_cores > 1) {
1116 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1117 seq_printf(m, "siblings\t: %d\n",
1118 cpus_weight(per_cpu(cpu_core_map, cpu)));
1119 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1120 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1126 "fpu_exception\t: yes\n"
1127 "cpuid level\t: %d\n"
1134 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1135 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1136 seq_printf(m, " %s", x86_cap_flags[i]);
1139 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1140 c->loops_per_jiffy/(500000/HZ),
1141 (c->loops_per_jiffy/(5000/HZ)) % 100);
1143 if (c->x86_tlbsize > 0)
1144 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1145 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1146 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1148 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1149 c->x86_phys_bits, c->x86_virt_bits);
1151 seq_printf(m, "power management:");
1154 for (i = 0; i < 32; i++)
1155 if (c->x86_power & (1 << i)) {
1156 if (i < ARRAY_SIZE(x86_power_flags) &&
1158 seq_printf(m, "%s%s",
1159 x86_power_flags[i][0]?" ":"",
1160 x86_power_flags[i]);
1162 seq_printf(m, " [%d]", i);
1166 seq_printf(m, "\n\n");
1171 static void *c_start(struct seq_file *m, loff_t *pos)
1173 if (*pos == 0) /* just in case, cpu 0 is not the first */
1174 *pos = first_cpu(cpu_online_map);
1175 if ((*pos) < NR_CPUS && cpu_online(*pos))
1176 return &cpu_data(*pos);
1180 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1182 *pos = next_cpu(*pos, cpu_online_map);
1183 return c_start(m, pos);
1186 static void c_stop(struct seq_file *m, void *v)
1190 struct seq_operations cpuinfo_op = {
1194 .show = show_cpuinfo,