]> err.no Git - linux-2.6/blobdiff - include/asm-x86/processor.h
[ALSA] opl3 - Use hwdep for patch loading
[linux-2.6] / include / asm-x86 / processor.h
index ea222cfe7b006ccf50d477e9512c6dd12932af92..ab4d0c2a3f8f286b792e2b2b724c6186849fb5e3 100644 (file)
@@ -3,16 +3,32 @@
 
 #include <asm/processor-flags.h>
 
+/* migration helpers, for KVM - will be removed in 2.6.25: */
+#include <asm/vm86.h>
+#define Xgt_desc_struct        desc_ptr
+
 /* Forward declaration, a strange C thing */
 struct task_struct;
 struct mm_struct;
 
-#include <asm/page.h>
-#include <asm/percpu.h>
+#include <asm/vm86.h>
+#include <asm/math_emu.h>
+#include <asm/segment.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/current.h>
+#include <asm/cpufeature.h>
 #include <asm/system.h>
+#include <asm/page.h>
 #include <asm/percpu.h>
+#include <asm/msr.h>
+#include <asm/desc_defs.h>
+#include <asm/nops.h>
+#include <linux/personality.h>
 #include <linux/cpumask.h>
 #include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/init.h>
 
 /*
  * Default implementation of macro that returns current
@@ -74,14 +90,14 @@ struct cpuinfo_x86 {
 #ifdef CONFIG_SMP
        cpumask_t llc_shared_map;       /* cpus sharing the last level cache */
 #endif
-       unsigned char x86_max_cores;    /* cpuid returned max cores value */
-       unsigned char apicid;
-       unsigned short x86_clflush_size;
+       u16 x86_max_cores;              /* cpuid returned max cores value */
+       u16 apicid;
+       u16 x86_clflush_size;
 #ifdef CONFIG_SMP
-       unsigned char booted_cores;     /* number of cores as seen by OS */
-       __u8 phys_proc_id;              /* Physical processor id. */
-       __u8 cpu_core_id;               /* Core id */
-       __u8 cpu_index;                 /* index into per_cpu list */
+       u16 booted_cores;               /* number of cores as seen by OS */
+       u16 phys_proc_id;               /* Physical processor id. */
+       u16 cpu_core_id;                /* Core id */
+       u16 cpu_index;                  /* index into per_cpu list */
 #endif
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
@@ -102,6 +118,7 @@ struct cpuinfo_x86 {
 extern struct cpuinfo_x86 boot_cpu_data;
 extern struct cpuinfo_x86 new_cpu_data;
 extern struct tss_struct doublefault_tss;
+extern __u32 cleared_cpu_caps[NCAPINTS];
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
@@ -285,9 +302,13 @@ union i387_union {
 };
 
 #ifdef CONFIG_X86_32
-# include "processor_32.h"
+/*
+ * the following now lives in the per cpu area:
+ * extern      int cpu_llc_id[NR_CPUS];
+ */
+DECLARE_PER_CPU(u8, cpu_llc_id);
 #else
-# include "processor_64.h"
+DECLARE_PER_CPU(struct orig_ist, orig_ist);
 #endif
 
 extern void print_cpu_info(struct cpuinfo_x86 *);
@@ -419,6 +440,13 @@ static inline void native_load_sp0(struct tss_struct *tss,
 #endif
 }
 
+static inline void native_swapgs(void)
+{
+#ifdef CONFIG_X86_64
+       asm volatile("swapgs" ::: "memory");
+#endif
+}
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
@@ -440,6 +468,7 @@ static inline void load_sp0(struct tss_struct *tss,
 }
 
 #define set_iopl_mask native_set_iopl_mask
+#define SWAPGS swapgs
 #endif /* CONFIG_PARAVIRT */
 
 /*
@@ -650,91 +679,6 @@ extern int bootloader_type;
 extern char ignore_fpu_irq;
 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
 
-/* generic versions from gas */
-#define GENERIC_NOP1   ".byte 0x90\n"
-#define GENERIC_NOP2           ".byte 0x89,0xf6\n"
-#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
-#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
-#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
-#define GENERIC_NOP6   ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
-#define GENERIC_NOP7   ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
-#define GENERIC_NOP8   GENERIC_NOP1 GENERIC_NOP7
-
-/* Opteron nops */
-#define K8_NOP1 GENERIC_NOP1
-#define K8_NOP2        ".byte 0x66,0x90\n"
-#define K8_NOP3        ".byte 0x66,0x66,0x90\n"
-#define K8_NOP4        ".byte 0x66,0x66,0x66,0x90\n"
-#define K8_NOP5        K8_NOP3 K8_NOP2
-#define K8_NOP6        K8_NOP3 K8_NOP3
-#define K8_NOP7        K8_NOP4 K8_NOP3
-#define K8_NOP8        K8_NOP4 K8_NOP4
-
-/* K7 nops */
-/* uses eax dependencies (arbitary choice) */
-#define K7_NOP1  GENERIC_NOP1
-#define K7_NOP2        ".byte 0x8b,0xc0\n"
-#define K7_NOP3        ".byte 0x8d,0x04,0x20\n"
-#define K7_NOP4        ".byte 0x8d,0x44,0x20,0x00\n"
-#define K7_NOP5        K7_NOP4 ASM_NOP1
-#define K7_NOP6        ".byte 0x8d,0x80,0,0,0,0\n"
-#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
-#define K7_NOP8        K7_NOP7 ASM_NOP1
-
-/* P6 nops */
-/* uses eax dependencies (Intel-recommended choice) */
-#define P6_NOP1        GENERIC_NOP1
-#define P6_NOP2        ".byte 0x66,0x90\n"
-#define P6_NOP3        ".byte 0x0f,0x1f,0x00\n"
-#define P6_NOP4        ".byte 0x0f,0x1f,0x40,0\n"
-#define P6_NOP5        ".byte 0x0f,0x1f,0x44,0x00,0\n"
-#define P6_NOP6        ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
-#define P6_NOP7        ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
-#define P6_NOP8        ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
-
-#ifdef CONFIG_MK7
-#define ASM_NOP1 K7_NOP1
-#define ASM_NOP2 K7_NOP2
-#define ASM_NOP3 K7_NOP3
-#define ASM_NOP4 K7_NOP4
-#define ASM_NOP5 K7_NOP5
-#define ASM_NOP6 K7_NOP6
-#define ASM_NOP7 K7_NOP7
-#define ASM_NOP8 K7_NOP8
-#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
-      defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
-      defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) || \
-      defined(CONFIG_MPSC)
-#define ASM_NOP1 P6_NOP1
-#define ASM_NOP2 P6_NOP2
-#define ASM_NOP3 P6_NOP3
-#define ASM_NOP4 P6_NOP4
-#define ASM_NOP5 P6_NOP5
-#define ASM_NOP6 P6_NOP6
-#define ASM_NOP7 P6_NOP7
-#define ASM_NOP8 P6_NOP8
-#elif defined(CONFIG_MK8) || defined(CONFIG_X86_64)
-#define ASM_NOP1 K8_NOP1
-#define ASM_NOP2 K8_NOP2
-#define ASM_NOP3 K8_NOP3
-#define ASM_NOP4 K8_NOP4
-#define ASM_NOP5 K8_NOP5
-#define ASM_NOP6 K8_NOP6
-#define ASM_NOP7 K8_NOP7
-#define ASM_NOP8 K8_NOP8
-#else
-#define ASM_NOP1 GENERIC_NOP1
-#define ASM_NOP2 GENERIC_NOP2
-#define ASM_NOP3 GENERIC_NOP3
-#define ASM_NOP4 GENERIC_NOP4
-#define ASM_NOP5 GENERIC_NOP5
-#define ASM_NOP6 GENERIC_NOP6
-#define ASM_NOP7 GENERIC_NOP7
-#define ASM_NOP8 GENERIC_NOP8
-#endif
-
-#define ASM_NOP_MAX 8
-
 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
 #define ARCH_HAS_PREFETCHW
 #define ARCH_HAS_SPINLOCK_PREFETCH
@@ -770,6 +714,124 @@ static inline void prefetchw(const void *x)
 }
 
 #define spin_lock_prefetch(x)  prefetchw(x)
+#ifdef CONFIG_X86_32
+/*
+ * User space process size: 3GB (default).
+ */
+#define TASK_SIZE      (PAGE_OFFSET)
+
+#define INIT_THREAD  {                                                 \
+       .sp0 = sizeof(init_stack) + (long)&init_stack,                  \
+       .vm86_info = NULL,                                              \
+       .sysenter_cs = __KERNEL_CS,                                     \
+       .io_bitmap_ptr = NULL,                                          \
+       .fs = __KERNEL_PERCPU,                                          \
+}
+
+/*
+ * Note that the .io_bitmap member must be extra-big. This is because
+ * the CPU will access an additional byte beyond the end of the IO
+ * permission bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+#define INIT_TSS  {                                                    \
+       .x86_tss = {                                                    \
+               .sp0            = sizeof(init_stack) + (long)&init_stack, \
+               .ss0            = __KERNEL_DS,                          \
+               .ss1            = __KERNEL_CS,                          \
+               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,             \
+        },                                                             \
+       .io_bitmap      = { [0 ... IO_BITMAP_LONGS] = ~0 },             \
+}
+
+#define start_thread(regs, new_eip, new_esp) do {              \
+       __asm__("movl %0,%%gs": :"r" (0));                      \
+       regs->fs = 0;                                           \
+       set_fs(USER_DS);                                        \
+       regs->ds = __USER_DS;                                   \
+       regs->es = __USER_DS;                                   \
+       regs->ss = __USER_DS;                                   \
+       regs->cs = __USER_CS;                                   \
+       regs->ip = new_eip;                                     \
+       regs->sp = new_esp;                                     \
+} while (0)
+
+
+extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
+#define KSTK_TOP(info)                                                 \
+({                                                                     \
+       unsigned long *__ptr = (unsigned long *)(info);                 \
+       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
+})
+
+/*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+ * This is necessary to guarantee that the entire "struct pt_regs"
+ * is accessable even if the CPU haven't stored the SS/ESP registers
+ * on the stack (interrupt gate does not save these registers
+ * when switching to the same priv ring).
+ * Therefore beware: accessing the ss/esp fields of the
+ * "struct pt_regs" is possible, but they may contain the
+ * completely wrong values.
+ */
+#define task_pt_regs(task)                                             \
+({                                                                     \
+       struct pt_regs *__regs__;                                       \
+       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
+       __regs__ - 1;                                                   \
+})
+
+#define KSTK_ESP(task) (task_pt_regs(task)->sp)
+
+#else
+/*
+ * User space process size. 47bits minus one guard page.
+ */
+#define TASK_SIZE64    (0x800000000000UL - 4096)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+                          0xc0000000 : 0xFFFFe000)
+
+#define TASK_SIZE              (test_thread_flag(TIF_IA32) ? \
+                                IA32_PAGE_OFFSET : TASK_SIZE64)
+#define TASK_SIZE_OF(child)    ((test_tsk_thread_flag(child, TIF_IA32)) ? \
+                                 IA32_PAGE_OFFSET : TASK_SIZE64)
+
+#define INIT_THREAD  { \
+       .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
+}
+
+#define INIT_TSS  { \
+       .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
+}
+
+#define start_thread(regs, new_rip, new_rsp) do {                           \
+       asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));  \
+       load_gs_index(0);                                                    \
+       (regs)->ip = (new_rip);                                              \
+       (regs)->sp = (new_rsp);                                              \
+       write_pda(oldrsp, (new_rsp));                                        \
+       (regs)->cs = __USER_CS;                                              \
+       (regs)->ss = __USER_DS;                                              \
+       (regs)->flags = 0x200;                                               \
+       set_fs(USER_DS);                                                     \
+} while (0)
+
+/*
+ * Return saved PC of a blocked thread.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ */
+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
+
+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
+#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
+#endif /* CONFIG_X86_64 */
+
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */