2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/screen_info.h>
19 #include <linux/ioport.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/highmem.h>
24 #include <linux/bootmem.h>
25 #include <linux/module.h>
26 #include <asm/processor.h>
27 #include <linux/console.h>
28 #include <linux/seq_file.h>
29 #include <linux/crash_dump.h>
30 #include <linux/root_dev.h>
31 #include <linux/pci.h>
32 #include <asm/pci-direct.h>
33 #include <linux/efi.h>
34 #include <linux/acpi.h>
35 #include <linux/kallsyms.h>
36 #include <linux/edd.h>
37 #include <linux/iscsi_ibft.h>
38 #include <linux/mmzone.h>
39 #include <linux/kexec.h>
40 #include <linux/cpufreq.h>
41 #include <linux/dmi.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/ctype.h>
44 #include <linux/sort.h>
45 #include <linux/uaccess.h>
46 #include <linux/init_ohci1394_dma.h>
47 #include <linux/kvm_para.h>
50 #include <asm/uaccess.h>
51 #include <asm/system.h>
52 #include <asm/vsyscall.h>
57 #include <video/edid.h>
61 #include <asm/mpspec.h>
62 #include <asm/mmu_context.h>
63 #include <asm/proto.h>
64 #include <asm/setup.h>
66 #include <asm/sections.h>
68 #include <asm/cacheflush.h>
71 #include <asm/topology.h>
72 #include <asm/trampoline.h>
74 #include <asm/mmconfig.h>
78 #include <mach_apic.h>
79 #ifdef CONFIG_PARAVIRT
80 #include <asm/paravirt.h>
89 struct cpuinfo_x86 boot_cpu_data __read_mostly;
90 EXPORT_SYMBOL(boot_cpu_data);
92 __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
94 unsigned long mmu_cr4_features;
96 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
99 unsigned long saved_video_mode;
105 char dmi_alloc_data[DMI_MAX_DATA];
110 struct screen_info screen_info;
111 EXPORT_SYMBOL(screen_info);
112 struct sys_desc_table_struct {
113 unsigned short length;
114 unsigned char table[0];
117 struct edid_info edid_info;
118 EXPORT_SYMBOL_GPL(edid_info);
120 extern int root_mountflags;
122 char __initdata command_line[COMMAND_LINE_SIZE];
124 static struct resource standard_io_resources[] = {
125 { .name = "dma1", .start = 0x00, .end = 0x1f,
126 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
127 { .name = "pic1", .start = 0x20, .end = 0x21,
128 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
129 { .name = "timer0", .start = 0x40, .end = 0x43,
130 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
131 { .name = "timer1", .start = 0x50, .end = 0x53,
132 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
133 { .name = "keyboard", .start = 0x60, .end = 0x60,
134 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
135 { .name = "keyboard", .start = 0x64, .end = 0x64,
136 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
137 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
138 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
139 { .name = "pic2", .start = 0xa0, .end = 0xa1,
140 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
141 { .name = "dma2", .start = 0xc0, .end = 0xdf,
142 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
143 { .name = "fpu", .start = 0xf0, .end = 0xff,
144 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
147 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
149 static struct resource data_resource = {
150 .name = "Kernel data",
153 .flags = IORESOURCE_RAM,
155 static struct resource code_resource = {
156 .name = "Kernel code",
159 .flags = IORESOURCE_RAM,
161 static struct resource bss_resource = {
162 .name = "Kernel bss",
165 .flags = IORESOURCE_RAM,
168 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
170 #ifdef CONFIG_PROC_VMCORE
171 /* elfcorehdr= specifies the location of elf core header
172 * stored by the crashed kernel. This option will be passed
173 * by kexec loader to the capture kernel.
175 static int __init setup_elfcorehdr(char *arg)
180 elfcorehdr_addr = memparse(arg, &end);
181 return end > arg ? 0 : -EINVAL;
183 early_param("elfcorehdr", setup_elfcorehdr);
188 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
190 unsigned long bootmap_size, bootmap;
192 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
193 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
196 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
197 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
198 e820_register_active_regions(0, start_pfn, end_pfn);
199 free_bootmem_with_active_regions(0, end_pfn);
200 early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
201 reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
205 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
207 #ifdef CONFIG_EDD_MODULE
211 * copy_edd() - Copy the BIOS EDD information
212 * from boot_params into a safe place.
215 static inline void copy_edd(void)
217 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
218 sizeof(edd.mbr_signature));
219 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
220 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
221 edd.edd_info_nr = boot_params.eddbuf_entries;
224 static inline void copy_edd(void)
230 static void __init reserve_crashkernel(void)
232 unsigned long long total_mem;
233 unsigned long long crash_size, crash_base;
236 total_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
238 ret = parse_crashkernel(boot_command_line, total_mem,
239 &crash_size, &crash_base);
240 if (ret == 0 && crash_size) {
241 if (crash_base <= 0) {
242 printk(KERN_INFO "crashkernel reservation failed - "
243 "you have to specify a base address\n");
247 if (reserve_bootmem(crash_base, crash_size,
248 BOOTMEM_EXCLUSIVE) < 0) {
249 printk(KERN_INFO "crashkernel reservation failed - "
250 "memory is in use\n");
254 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
255 "for crashkernel (System RAM: %ldMB)\n",
256 (unsigned long)(crash_size >> 20),
257 (unsigned long)(crash_base >> 20),
258 (unsigned long)(total_mem >> 20));
259 crashk_res.start = crash_base;
260 crashk_res.end = crash_base + crash_size - 1;
261 insert_resource(&iomem_resource, &crashk_res);
265 static inline void __init reserve_crashkernel(void)
269 /* Overridden in paravirt.c if CONFIG_PARAVIRT */
270 void __attribute__((weak)) __init memory_setup(void)
272 machine_specific_memory_setup();
275 static void __init parse_setup_data(void)
277 struct setup_data *data;
278 unsigned long pa_data;
280 if (boot_params.hdr.version < 0x0209)
282 pa_data = boot_params.hdr.setup_data;
284 data = early_ioremap(pa_data, PAGE_SIZE);
285 switch (data->type) {
289 #ifndef CONFIG_DEBUG_BOOT_PARAMS
290 free_early(pa_data, pa_data+sizeof(*data)+data->len);
292 pa_data = data->next;
293 early_iounmap(data, PAGE_SIZE);
298 * setup_arch - architecture-specific boot-time initializations
300 * Note: On x86_64, fixmaps are ready for use even before this is called.
302 void __init setup_arch(char **cmdline_p)
306 printk(KERN_INFO "Command line: %s\n", boot_command_line);
308 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
309 screen_info = boot_params.screen_info;
310 edid_info = boot_params.edid_info;
311 saved_video_mode = boot_params.hdr.vid_mode;
312 bootloader_type = boot_params.hdr.type_of_loader;
314 #ifdef CONFIG_BLK_DEV_RAM
315 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
316 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
317 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
320 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
330 if (!boot_params.hdr.root_flags)
331 root_mountflags &= ~MS_RDONLY;
332 init_mm.start_code = (unsigned long) &_text;
333 init_mm.end_code = (unsigned long) &_etext;
334 init_mm.end_data = (unsigned long) &_edata;
335 init_mm.brk = (unsigned long) &_end;
337 code_resource.start = virt_to_phys(&_text);
338 code_resource.end = virt_to_phys(&_etext)-1;
339 data_resource.start = virt_to_phys(&_etext);
340 data_resource.end = virt_to_phys(&_edata)-1;
341 bss_resource.start = virt_to_phys(&__bss_start);
342 bss_resource.end = virt_to_phys(&__bss_stop)-1;
344 early_identify_cpu(&boot_cpu_data);
346 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
347 *cmdline_p = command_line;
353 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
354 if (init_ohci1394_dma_early)
355 init_ohci1394_dma_on_all_controllers();
358 finish_e820_parsing();
360 /* after parse_early_param, so could debug it */
361 insert_resource(&iomem_resource, &code_resource);
362 insert_resource(&iomem_resource, &data_resource);
363 insert_resource(&iomem_resource, &bss_resource);
365 early_gart_iommu_check();
367 e820_register_active_regions(0, 0, -1UL);
369 * partially used pages are not usable - thus
370 * we are rounding upwards:
372 end_pfn = e820_end_of_ram();
373 /* update e820 for memory not covered by WB MTRRs */
375 if (mtrr_trim_uncached_memory(end_pfn)) {
376 e820_register_active_regions(0, 0, -1UL);
377 end_pfn = e820_end_of_ram();
380 num_physpages = end_pfn;
384 max_pfn_mapped = init_memory_mapping(0, (max_pfn_mapped << PAGE_SHIFT));
394 #ifdef CONFIG_KVM_CLOCK
399 /* setup to use the early static init tables during kernel startup */
400 x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
401 x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
403 x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
409 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
410 * Call this early for SRAT node setup.
412 acpi_boot_table_init();
415 /* How many end-of-memory variables you have, grandma! */
416 max_low_pfn = end_pfn;
418 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
420 /* Remove active ranges so rediscovery with NUMA-awareness happens */
421 remove_all_active_ranges();
423 #ifdef CONFIG_ACPI_NUMA
425 * Parse SRAT to discover nodes.
431 numa_initmem_init(0, end_pfn);
433 contig_initmem_init(0, end_pfn);
436 dma32_reserve_bootmem();
438 #ifdef CONFIG_ACPI_SLEEP
440 * Reserve low memory region for sleep support.
442 acpi_reserve_bootmem();
446 efi_reserve_bootmem();
449 * Find and reserve possible boot-time SMP configuration:
452 #ifdef CONFIG_BLK_DEV_INITRD
453 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
454 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
455 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
456 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
457 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
459 if (ramdisk_end <= end_of_mem) {
461 * don't need to reserve again, already reserved early
462 * in x86_64_start_kernel, and early_res_to_bootmem
463 * convert that to reserved in bootmem
465 initrd_start = ramdisk_image + PAGE_OFFSET;
466 initrd_end = initrd_start+ramdisk_size;
468 free_bootmem(ramdisk_image, ramdisk_size);
469 printk(KERN_ERR "initrd extends beyond end of memory "
470 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
471 ramdisk_end, end_of_mem);
476 reserve_crashkernel();
478 reserve_ibft_region();
487 * Read APIC and some other early information from ACPI tables.
495 * get boot-time SMP configuration:
497 if (smp_found_config)
499 init_apic_mappings();
500 ioapic_init_mappings();
505 * We trust e820 completely. No explicit ROM probing in memory.
507 e820_reserve_resources();
508 e820_mark_nosave_regions();
510 /* request I/O space for devices used on all i[345]86 PCs */
511 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
512 request_resource(&ioport_resource, &standard_io_resources[i]);
517 #if defined(CONFIG_VGA_CONSOLE)
518 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
519 conswitchp = &vga_con;
520 #elif defined(CONFIG_DUMMY_CONSOLE)
521 conswitchp = &dummy_con;
525 /* do this before identify_cpu for boot cpu */
526 check_enable_amd_mmconf_dmi();
529 int __cpuinit get_model_name(struct cpuinfo_x86 *c)
533 if (c->extended_cpuid_level < 0x80000004)
536 v = (unsigned int *) c->x86_model_id;
537 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
538 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
539 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
540 c->x86_model_id[48] = 0;
545 void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
547 unsigned int n, dummy, eax, ebx, ecx, edx;
549 n = c->extended_cpuid_level;
551 if (n >= 0x80000005) {
552 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
553 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
554 "D cache %dK (%d bytes/line)\n",
555 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
556 c->x86_cache_size = (ecx>>24) + (edx>>24);
557 /* On K8 L1 TLB is inclusive, so don't count it */
561 if (n >= 0x80000006) {
562 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
563 ecx = cpuid_ecx(0x80000006);
564 c->x86_cache_size = ecx >> 16;
565 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
567 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
568 c->x86_cache_size, ecx & 0xFF);
570 if (n >= 0x80000008) {
571 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
572 c->x86_virt_bits = (eax >> 8) & 0xff;
573 c->x86_phys_bits = eax & 0xff;
577 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
580 u32 eax, ebx, ecx, edx;
581 int index_msb, core_bits;
583 cpuid(1, &eax, &ebx, &ecx, &edx);
586 if (!cpu_has(c, X86_FEATURE_HT))
588 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
591 smp_num_siblings = (ebx & 0xff0000) >> 16;
593 if (smp_num_siblings == 1) {
594 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
595 } else if (smp_num_siblings > 1) {
597 if (smp_num_siblings > NR_CPUS) {
598 printk(KERN_WARNING "CPU: Unsupported number of "
599 "siblings %d", smp_num_siblings);
600 smp_num_siblings = 1;
604 index_msb = get_count_order(smp_num_siblings);
605 c->phys_proc_id = phys_pkg_id(index_msb);
607 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
609 index_msb = get_count_order(smp_num_siblings);
611 core_bits = get_count_order(c->x86_max_cores);
613 c->cpu_core_id = phys_pkg_id(index_msb) &
614 ((1 << core_bits) - 1);
617 if ((c->x86_max_cores * smp_num_siblings) > 1) {
618 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
620 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
627 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
629 char *v = c->x86_vendor_id;
631 if (!strcmp(v, "AuthenticAMD"))
632 c->x86_vendor = X86_VENDOR_AMD;
633 else if (!strcmp(v, "GenuineIntel"))
634 c->x86_vendor = X86_VENDOR_INTEL;
635 else if (!strcmp(v, "CentaurHauls"))
636 c->x86_vendor = X86_VENDOR_CENTAUR;
638 c->x86_vendor = X86_VENDOR_UNKNOWN;
641 // FIXME: Needs to use cpu_vendor_dev_register
642 extern void __cpuinit early_init_amd(struct cpuinfo_x86 *c);
643 extern void __cpuinit init_amd(struct cpuinfo_x86 *c);
644 extern void __cpuinit early_init_intel(struct cpuinfo_x86 *c);
645 extern void __cpuinit init_intel(struct cpuinfo_x86 *c);
646 extern void __cpuinit early_init_centaur(struct cpuinfo_x86 *c);
647 extern void __cpuinit init_centaur(struct cpuinfo_x86 *c);
649 /* Do some early cpuid on the boot CPU to get some parameter that are
650 needed before check_bugs. Everything advanced is in identify_cpu
652 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
656 c->loops_per_jiffy = loops_per_jiffy;
657 c->x86_cache_size = -1;
658 c->x86_vendor = X86_VENDOR_UNKNOWN;
659 c->x86_model = c->x86_mask = 0; /* So far unknown... */
660 c->x86_vendor_id[0] = '\0'; /* Unset */
661 c->x86_model_id[0] = '\0'; /* Unset */
662 c->x86_clflush_size = 64;
663 c->x86_cache_alignment = c->x86_clflush_size;
664 c->x86_max_cores = 1;
665 c->x86_coreid_bits = 0;
666 c->extended_cpuid_level = 0;
667 memset(&c->x86_capability, 0, sizeof c->x86_capability);
669 /* Get vendor name */
670 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
671 (unsigned int *)&c->x86_vendor_id[0],
672 (unsigned int *)&c->x86_vendor_id[8],
673 (unsigned int *)&c->x86_vendor_id[4]);
677 /* Initialize the standard set of capabilities */
678 /* Note that the vendor-specific code below might override */
680 /* Intel-defined flags: level 0x00000001 */
681 if (c->cpuid_level >= 0x00000001) {
683 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
684 &c->x86_capability[0]);
685 c->x86 = (tfms >> 8) & 0xf;
686 c->x86_model = (tfms >> 4) & 0xf;
687 c->x86_mask = tfms & 0xf;
689 c->x86 += (tfms >> 20) & 0xff;
691 c->x86_model += ((tfms >> 16) & 0xF) << 4;
692 if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
693 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
695 /* Have CPUID level 0 only - unheard of */
699 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
701 c->phys_proc_id = c->initial_apicid;
703 /* AMD-defined flags: level 0x80000001 */
704 xlvl = cpuid_eax(0x80000000);
705 c->extended_cpuid_level = xlvl;
706 if ((xlvl & 0xffff0000) == 0x80000000) {
707 if (xlvl >= 0x80000001) {
708 c->x86_capability[1] = cpuid_edx(0x80000001);
709 c->x86_capability[6] = cpuid_ecx(0x80000001);
711 if (xlvl >= 0x80000004)
712 get_model_name(c); /* Default name */
715 /* Transmeta-defined flags: level 0x80860001 */
716 xlvl = cpuid_eax(0x80860000);
717 if ((xlvl & 0xffff0000) == 0x80860000) {
718 /* Don't set x86_cpuid_level here for now to not confuse. */
719 if (xlvl >= 0x80860001)
720 c->x86_capability[2] = cpuid_edx(0x80860001);
723 c->extended_cpuid_level = cpuid_eax(0x80000000);
724 if (c->extended_cpuid_level >= 0x80000007)
725 c->x86_power = cpuid_edx(0x80000007);
727 switch (c->x86_vendor) {
731 case X86_VENDOR_INTEL:
734 case X86_VENDOR_CENTAUR:
735 early_init_centaur(c);
739 validate_pat_support(c);
743 * This does the hard work of actually picking apart the CPU stuff...
745 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
749 early_identify_cpu(c);
751 init_scattered_cpuid_features(c);
753 c->apicid = phys_pkg_id(0);
756 * Vendor-specific initialization. In this section we
757 * canonicalize the feature flags, meaning if there are
758 * features a certain CPU supports which CPUID doesn't
759 * tell us, CPUID claiming incorrect flags, or other bugs,
760 * we handle them here.
762 * At the end of this section, c->x86_capability better
763 * indicate the features this CPU genuinely supports!
765 switch (c->x86_vendor) {
770 case X86_VENDOR_INTEL:
774 case X86_VENDOR_CENTAUR:
778 case X86_VENDOR_UNKNOWN:
780 display_cacheinfo(c);
787 * On SMP, boot_cpu_data holds the common feature set between
788 * all CPUs; so make sure that we indicate which features are
789 * common between the CPUs. The first time this routine gets
790 * executed, c == &boot_cpu_data.
792 if (c != &boot_cpu_data) {
793 /* AND the already accumulated flags with these */
794 for (i = 0; i < NCAPINTS; i++)
795 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
798 /* Clear all flags overriden by options */
799 for (i = 0; i < NCAPINTS; i++)
800 c->x86_capability[i] &= ~cleared_cpu_caps[i];
802 #ifdef CONFIG_X86_MCE
805 select_idle_routine(c);
808 numa_add_cpu(smp_processor_id());
813 void __cpuinit identify_boot_cpu(void)
815 identify_cpu(&boot_cpu_data);
818 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
820 BUG_ON(c == &boot_cpu_data);
825 static __init int setup_noclflush(char *arg)
827 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
830 __setup("noclflush", setup_noclflush);
832 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
834 if (c->x86_model_id[0])
835 printk(KERN_CONT "%s", c->x86_model_id);
837 if (c->x86_mask || c->cpuid_level >= 0)
838 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
840 printk(KERN_CONT "\n");
843 static __init int setup_disablecpuid(char *arg)
846 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
847 setup_clear_cpu_cap(bit);
852 __setup("clearcpuid=", setup_disablecpuid);