2 * include/asm-i386/processor.h
4 * Copyright (C) 1994 Linus Torvalds
7 #ifndef __ASM_I386_PROCESSOR_H
8 #define __ASM_I386_PROCESSOR_H
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
14 #include <asm/types.h>
15 #include <asm/sigcontext.h>
16 #include <asm/cpufeature.h>
18 #include <asm/system.h>
19 #include <linux/cache.h>
20 #include <linux/threads.h>
21 #include <asm/percpu.h>
22 #include <linux/cpumask.h>
24 /* flag for disabling the tsc */
25 extern int tsc_disable;
31 #define desc_empty(desc) \
32 (!((desc)->a | (desc)->b))
34 #define desc_equal(desc1, desc2) \
35 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
37 * Default implementation of macro that returns current
38 * instruction pointer ("program counter").
40 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
43 * CPU type and hardware bug flags. Kept separately for each CPU.
44 * Members of this structure are referenced in head.S, so think twice
45 * before touching them. [mj]
49 __u8 x86; /* CPU family */
50 __u8 x86_vendor; /* CPU vendor */
53 char wp_works_ok; /* It doesn't on 386's */
54 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
57 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
58 unsigned long x86_capability[NCAPINTS];
59 char x86_vendor_id[16];
60 char x86_model_id[64];
61 int x86_cache_size; /* in KB - valid for CPUS which support this
63 int x86_cache_alignment; /* In bytes */
69 unsigned long loops_per_jiffy;
71 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
73 unsigned char x86_max_cores; /* cpuid returned max cores value */
75 unsigned short x86_clflush_size;
77 unsigned char booted_cores; /* number of cores as seen by OS */
78 __u8 phys_proc_id; /* Physical processor id. */
79 __u8 cpu_core_id; /* Core id */
81 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
83 #define X86_VENDOR_INTEL 0
84 #define X86_VENDOR_CYRIX 1
85 #define X86_VENDOR_AMD 2
86 #define X86_VENDOR_UMC 3
87 #define X86_VENDOR_NEXGEN 4
88 #define X86_VENDOR_CENTAUR 5
89 #define X86_VENDOR_RISE 6
90 #define X86_VENDOR_TRANSMETA 7
91 #define X86_VENDOR_NSC 8
92 #define X86_VENDOR_NUM 9
93 #define X86_VENDOR_UNKNOWN 0xff
96 * capabilities of CPUs
99 extern struct cpuinfo_x86 boot_cpu_data;
100 extern struct cpuinfo_x86 new_cpu_data;
101 extern struct tss_struct doublefault_tss;
102 DECLARE_PER_CPU(struct tss_struct, init_tss);
105 extern struct cpuinfo_x86 cpu_data[];
106 #define current_cpu_data cpu_data[smp_processor_id()]
108 #define cpu_data (&boot_cpu_data)
109 #define current_cpu_data boot_cpu_data
112 extern int cpu_llc_id[NR_CPUS];
113 extern char ignore_fpu_irq;
115 extern void identify_cpu(struct cpuinfo_x86 *);
116 extern void print_cpu_info(struct cpuinfo_x86 *);
117 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
118 extern unsigned short num_cache_leaves;
121 extern void detect_ht(struct cpuinfo_x86 *c);
123 static inline void detect_ht(struct cpuinfo_x86 *c) {}
129 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
130 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
131 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
132 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
133 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
134 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
135 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
136 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
137 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
138 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
139 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
140 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
141 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
142 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
143 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
144 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
145 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
147 static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
148 unsigned int *ecx, unsigned int *edx)
150 /* ecx is often an input as well as an output. */
156 : "0" (*eax), "2" (*ecx));
159 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
162 * Intel CPU features in CR4
164 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
165 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
166 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
167 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
168 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
169 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
170 #define X86_CR4_MCE 0x0040 /* Machine check enable */
171 #define X86_CR4_PGE 0x0080 /* enable global pages */
172 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
173 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
174 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
177 * Save the cr4 feature set we're using (ie
178 * Pentium 4MB enable and PPro Global page
179 * enable), so that any CPU's that boot up
180 * after us can get the correct flags.
182 extern unsigned long mmu_cr4_features;
184 static inline void set_in_cr4 (unsigned long mask)
187 mmu_cr4_features |= mask;
193 static inline void clear_in_cr4 (unsigned long mask)
196 mmu_cr4_features &= ~mask;
203 * NSC/Cyrix CPU configuration register indexes
206 #define CX86_PCR0 0x20
207 #define CX86_GCR 0xb8
208 #define CX86_CCR0 0xc0
209 #define CX86_CCR1 0xc1
210 #define CX86_CCR2 0xc2
211 #define CX86_CCR3 0xc3
212 #define CX86_CCR4 0xe8
213 #define CX86_CCR5 0xe9
214 #define CX86_CCR6 0xea
215 #define CX86_CCR7 0xeb
216 #define CX86_PCR1 0xf0
217 #define CX86_DIR0 0xfe
218 #define CX86_DIR1 0xff
219 #define CX86_ARR_BASE 0xc4
220 #define CX86_RCR_BASE 0xdc
223 * NSC/Cyrix CPU indexed register access macros
226 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
228 #define setCx86(reg, data) do { \
230 outb((data), 0x23); \
233 /* Stop speculative execution */
234 static inline void sync_core(void)
237 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
240 static inline void __monitor(const void *eax, unsigned long ecx,
243 /* "monitor %eax,%ecx,%edx;" */
245 ".byte 0x0f,0x01,0xc8;"
246 : :"a" (eax), "c" (ecx), "d"(edx));
249 static inline void __mwait(unsigned long eax, unsigned long ecx)
251 /* "mwait %eax,%ecx;" */
253 ".byte 0x0f,0x01,0xc9;"
254 : :"a" (eax), "c" (ecx));
257 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
259 /* from system description table in BIOS. Mostly for MCA use, but
260 others may find it useful. */
261 extern unsigned int machine_id;
262 extern unsigned int machine_submodel_id;
263 extern unsigned int BIOS_revision;
264 extern unsigned int mca_pentium_flag;
266 /* Boot loader type from the setup header */
267 extern int bootloader_type;
270 * User space process size: 3GB (default).
272 #define TASK_SIZE (PAGE_OFFSET)
274 /* This decides where the kernel will search for a free chunk of vm
275 * space during mmap's.
277 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
279 #define HAVE_ARCH_PICK_MMAP_LAYOUT
284 #define IO_BITMAP_BITS 65536
285 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
286 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
287 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
288 #define INVALID_IO_BITMAP_OFFSET 0x8000
289 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
291 struct i387_fsave_struct {
299 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
300 long status; /* software status information */
303 struct i387_fxsave_struct {
314 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
315 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
317 } __attribute__ ((aligned (16)));
319 struct i387_soft_struct {
327 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
328 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
330 unsigned long entry_eip;
334 struct i387_fsave_struct fsave;
335 struct i387_fxsave_struct fxsave;
336 struct i387_soft_struct soft;
343 struct thread_struct;
346 unsigned short back_link,__blh;
348 unsigned short ss0,__ss0h;
350 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
352 unsigned short ss2,__ss2h;
355 unsigned long eflags;
356 unsigned long eax,ecx,edx,ebx;
361 unsigned short es, __esh;
362 unsigned short cs, __csh;
363 unsigned short ss, __ssh;
364 unsigned short ds, __dsh;
365 unsigned short fs, __fsh;
366 unsigned short gs, __gsh;
367 unsigned short ldt, __ldth;
368 unsigned short trace, io_bitmap_base;
370 * The extra 1 is there because the CPU will access an
371 * additional byte beyond the end of the IO permission
372 * bitmap. The extra byte must be all 1 bits, and must
373 * be within the limit.
375 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
377 * Cache the current maximum and the last task that used the bitmap:
379 unsigned long io_bitmap_max;
380 struct thread_struct *io_bitmap_owner;
382 * pads the TSS to be cacheline-aligned (size is 0x100)
384 unsigned long __cacheline_filler[35];
386 * .. and then another 0x100 bytes for emergency kernel stack
388 unsigned long stack[64];
389 } __attribute__((packed));
391 #define ARCH_MIN_TASKALIGN 16
393 struct thread_struct {
394 /* cached TLS descriptors. */
395 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
397 unsigned long sysenter_cs;
402 /* Hardware debugging registers */
403 unsigned long debugreg[8]; /* %%db0-7 debug registers */
405 unsigned long cr2, trap_no, error_code;
406 /* floating point info */
407 union i387_union i387;
408 /* virtual 86 mode info */
409 struct vm86_struct __user * vm86_info;
410 unsigned long screen_bitmap;
411 unsigned long v86flags, v86mask, saved_esp0;
412 unsigned int saved_fs, saved_gs;
414 unsigned long *io_bitmap_ptr;
416 /* max allowed port in the bitmap, in bytes: */
417 unsigned long io_bitmap_max;
420 #define INIT_THREAD { \
422 .sysenter_cs = __KERNEL_CS, \
423 .io_bitmap_ptr = NULL, \
424 .gs = __KERNEL_PDA, \
428 * Note that the .io_bitmap member must be extra-big. This is because
429 * the CPU will access an additional byte beyond the end of the IO
430 * permission bitmap. The extra byte must be all 1 bits, and must
431 * be within the limit.
434 .esp0 = sizeof(init_stack) + (long)&init_stack, \
435 .ss0 = __KERNEL_DS, \
436 .ss1 = __KERNEL_CS, \
437 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
438 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
441 #define start_thread(regs, new_eip, new_esp) do { \
442 __asm__("movl %0,%%fs": :"r" (0)); \
445 regs->xds = __USER_DS; \
446 regs->xes = __USER_DS; \
447 regs->xss = __USER_DS; \
448 regs->xcs = __USER_CS; \
449 regs->eip = new_eip; \
450 regs->esp = new_esp; \
453 /* Forward declaration, a strange C thing */
457 /* Free all resources held by a thread. */
458 extern void release_thread(struct task_struct *);
460 /* Prepare to copy thread state - unlazy all lazy status */
461 extern void prepare_to_copy(struct task_struct *tsk);
464 * create a kernel thread without removing it from tasklists
466 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
468 extern unsigned long thread_saved_pc(struct task_struct *tsk);
469 void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
471 unsigned long get_wchan(struct task_struct *p);
473 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
474 #define KSTK_TOP(info) \
476 unsigned long *__ptr = (unsigned long *)(info); \
477 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
481 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
482 * This is necessary to guarantee that the entire "struct pt_regs"
483 * is accessable even if the CPU haven't stored the SS/ESP registers
484 * on the stack (interrupt gate does not save these registers
485 * when switching to the same priv ring).
486 * Therefore beware: accessing the xss/esp fields of the
487 * "struct pt_regs" is possible, but they may contain the
488 * completely wrong values.
490 #define task_pt_regs(task) \
492 struct pt_regs *__regs__; \
493 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
497 #define KSTK_EIP(task) (task_pt_regs(task)->eip)
498 #define KSTK_ESP(task) (task_pt_regs(task)->esp)
501 struct microcode_header {
509 unsigned int datasize;
510 unsigned int totalsize;
511 unsigned int reserved[3];
515 struct microcode_header hdr;
516 unsigned int bits[0];
519 typedef struct microcode microcode_t;
520 typedef struct microcode_header microcode_header_t;
522 /* microcode format is extended from prescott processors */
523 struct extended_signature {
529 struct extended_sigtable {
532 unsigned int reserved[3];
533 struct extended_signature sigs[0];
536 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
537 static inline void rep_nop(void)
539 __asm__ __volatile__("rep;nop": : :"memory");
542 #define cpu_relax() rep_nop()
544 #ifdef CONFIG_PARAVIRT
545 #include <asm/paravirt.h>
547 #define paravirt_enabled() 0
548 #define __cpuid native_cpuid
550 static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
552 tss->esp0 = thread->esp0;
553 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
554 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
555 tss->ss1 = thread->sysenter_cs;
556 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
561 * These special macros can be used to get or set a debugging register
563 #define get_debugreg(var, register) \
564 __asm__("movl %%db" #register ", %0" \
566 #define set_debugreg(value, register) \
567 __asm__("movl %0,%%db" #register \
571 #define set_iopl_mask native_set_iopl_mask
572 #endif /* CONFIG_PARAVIRT */
575 * Set IOPL bits in EFLAGS from given mask
577 static fastcall inline void native_set_iopl_mask(unsigned mask)
580 __asm__ __volatile__ ("pushfl;"
587 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
591 * Generic CPUID function
592 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
593 * resulting in stale register contents being returned.
595 static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
599 __cpuid(eax, ebx, ecx, edx);
602 /* Some CPUID calls want 'count' to be placed in ecx */
603 static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
608 __cpuid(eax, ebx, ecx, edx);
612 * CPUID functions returning a single datum
614 static inline unsigned int cpuid_eax(unsigned int op)
616 unsigned int eax, ebx, ecx, edx;
618 cpuid(op, &eax, &ebx, &ecx, &edx);
621 static inline unsigned int cpuid_ebx(unsigned int op)
623 unsigned int eax, ebx, ecx, edx;
625 cpuid(op, &eax, &ebx, &ecx, &edx);
628 static inline unsigned int cpuid_ecx(unsigned int op)
630 unsigned int eax, ebx, ecx, edx;
632 cpuid(op, &eax, &ebx, &ecx, &edx);
635 static inline unsigned int cpuid_edx(unsigned int op)
637 unsigned int eax, ebx, ecx, edx;
639 cpuid(op, &eax, &ebx, &ecx, &edx);
643 /* generic versions from gas */
644 #define GENERIC_NOP1 ".byte 0x90\n"
645 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
646 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
647 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
648 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
649 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
650 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
651 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
654 #define K8_NOP1 GENERIC_NOP1
655 #define K8_NOP2 ".byte 0x66,0x90\n"
656 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
657 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
658 #define K8_NOP5 K8_NOP3 K8_NOP2
659 #define K8_NOP6 K8_NOP3 K8_NOP3
660 #define K8_NOP7 K8_NOP4 K8_NOP3
661 #define K8_NOP8 K8_NOP4 K8_NOP4
664 /* uses eax dependencies (arbitary choice) */
665 #define K7_NOP1 GENERIC_NOP1
666 #define K7_NOP2 ".byte 0x8b,0xc0\n"
667 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
668 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
669 #define K7_NOP5 K7_NOP4 ASM_NOP1
670 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
671 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
672 #define K7_NOP8 K7_NOP7 ASM_NOP1
675 #define ASM_NOP1 K8_NOP1
676 #define ASM_NOP2 K8_NOP2
677 #define ASM_NOP3 K8_NOP3
678 #define ASM_NOP4 K8_NOP4
679 #define ASM_NOP5 K8_NOP5
680 #define ASM_NOP6 K8_NOP6
681 #define ASM_NOP7 K8_NOP7
682 #define ASM_NOP8 K8_NOP8
683 #elif defined(CONFIG_MK7)
684 #define ASM_NOP1 K7_NOP1
685 #define ASM_NOP2 K7_NOP2
686 #define ASM_NOP3 K7_NOP3
687 #define ASM_NOP4 K7_NOP4
688 #define ASM_NOP5 K7_NOP5
689 #define ASM_NOP6 K7_NOP6
690 #define ASM_NOP7 K7_NOP7
691 #define ASM_NOP8 K7_NOP8
693 #define ASM_NOP1 GENERIC_NOP1
694 #define ASM_NOP2 GENERIC_NOP2
695 #define ASM_NOP3 GENERIC_NOP3
696 #define ASM_NOP4 GENERIC_NOP4
697 #define ASM_NOP5 GENERIC_NOP5
698 #define ASM_NOP6 GENERIC_NOP6
699 #define ASM_NOP7 GENERIC_NOP7
700 #define ASM_NOP8 GENERIC_NOP8
703 #define ASM_NOP_MAX 8
705 /* Prefetch instructions for Pentium III and AMD Athlon */
706 /* It's not worth to care about 3dnow! prefetches for the K6
707 because they are microcoded there and very slow.
708 However we don't do prefetches for pre XP Athlons currently
709 That should be fixed. */
710 #define ARCH_HAS_PREFETCH
711 static inline void prefetch(const void *x)
713 alternative_input(ASM_NOP4,
719 #define ARCH_HAS_PREFETCH
720 #define ARCH_HAS_PREFETCHW
721 #define ARCH_HAS_SPINLOCK_PREFETCH
723 /* 3dnow! prefetch to get an exclusive cache line. Useful for
724 spinlocks to avoid one state transition in the cache coherency protocol. */
725 static inline void prefetchw(const void *x)
727 alternative_input(ASM_NOP4,
732 #define spin_lock_prefetch(x) prefetchw(x)
734 extern void select_idle_routine(const struct cpuinfo_x86 *c);
736 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
738 extern unsigned long boot_option_idle_override;
739 extern void enable_sep_cpu(void);
740 extern int sysenter_setup(void);
742 extern int init_gdt(int cpu, struct task_struct *idle);
743 extern void secondary_cpu_init(void);
745 #endif /* __ASM_I386_PROCESSOR_H */