From: Glauber de Oliveira Costa Date: Wed, 30 Jan 2008 12:31:31 +0000 (+0100) Subject: x86: unify tss_struct X-Git-Tag: v2.6.25-rc1~1143^2~540 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ca241c75037b32e0216a68e39ad2801d04fa1f87;p=linux-2.6 x86: unify tss_struct Although slighly different, the tss_struct is very similar in x86_64 and i386. The really different part, which matchs the hardware vision of it, is now called x86_hw_tss, and each of the architectures provides yours. It's then used as a field in the outter tss_struct. Signed-off-by: Glauber de Oliveira Costa Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index a054287643..2b32719a3f 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -110,7 +110,7 @@ int main(void) ENTRY(cr8); BLANK(); #undef ENTRY - DEFINE(TSS_ist, offsetof(struct tss_struct, ist)); + DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist)); BLANK(); DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); BLANK(); diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c index d16122a8e4..a47798b59f 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault_32.c @@ -33,7 +33,7 @@ static void doublefault_fn(void) printk(KERN_EMERG "double fault, tss at %08lx\n", tss); if (ptr_ok(tss)) { - struct i386_hw_tss *t = (struct i386_hw_tss *)tss; + struct x86_hw_tss *t = (struct x86_hw_tss *)tss; printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->ip, t->sp); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 836a71adfa..af56104b73 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -639,7 +639,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Reload esp0, LDT and the page table pointer: */ - tss->sp0 = next->sp0; + tss->x86_tss.sp0 = next->sp0; /* * Switch DS and ES. diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c index 05cafcb941..3b0ffa31f3 100644 --- a/arch/x86/kernel/setup64.c +++ b/arch/x86/kernel/setup64.c @@ -258,10 +258,10 @@ void __cpuinit cpu_init (void) v, cpu); } estacks += PAGE_SIZE << order[v]; - orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks; + orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; } - t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); /* * <= is required because the CPU will access up to * 8 bits beyond the end of the IO permission bitmap. diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 4c03ddccd6..2ea02a71b6 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -614,7 +614,7 @@ do_rest: start_rip = setup_trampoline(); init_rsp = c_idle.idle->thread.sp; - per_cpu(init_tss,cpu).sp0 = init_rsp; + per_cpu(init_tss, cpu).x86_tss.sp0 = init_rsp; initial_code = start_secondary; clear_tsk_thread_flag(c_idle.idle, TIF_FORK); diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h index 3585a1628b..1c8367a692 100644 --- a/include/asm-x86/lguest.h +++ b/include/asm-x86/lguest.h @@ -51,7 +51,7 @@ struct lguest_ro_state /* Fields which are used when guest is running. */ struct desc_ptr guest_idt_desc; struct desc_ptr guest_gdt_desc; - struct i386_hw_tss guest_tss; + struct x86_hw_tss guest_tss; struct desc_struct guest_idt[IDT_ENTRIES]; struct desc_struct guest_gdt[GDT_ENTRIES]; }; diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 3deb5ba55f..cede9ad3dc 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -8,6 +8,7 @@ struct task_struct; struct mm_struct; #include +#include #include /* @@ -38,6 +39,82 @@ static inline void load_cr3(pgd_t *pgdir) write_cr3(__pa(pgdir)); } +#ifdef CONFIG_X86_32 +/* This is the TSS defined by the hardware. */ +struct x86_hw_tss { + unsigned short back_link, __blh; + unsigned long sp0; + unsigned short ss0, __ss0h; + unsigned long sp1; + unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ + unsigned long sp2; + unsigned short ss2, __ss2h; + unsigned long __cr3; + unsigned long ip; + unsigned long flags; + unsigned long ax, cx, dx, bx; + unsigned long sp, bp, si, di; + unsigned short es, __esh; + unsigned short cs, __csh; + unsigned short ss, __ssh; + unsigned short ds, __dsh; + unsigned short fs, __fsh; + unsigned short gs, __gsh; + unsigned short ldt, __ldth; + unsigned short trace, io_bitmap_base; +} __attribute__((packed)); +#else +struct x86_hw_tss { + u32 reserved1; + u64 sp0; + u64 sp1; + u64 sp2; + u64 reserved2; + u64 ist[7]; + u32 reserved3; + u32 reserved4; + u16 reserved5; + u16 io_bitmap_base; +} __attribute__((packed)) ____cacheline_aligned; +#endif + +/* + * Size of io_bitmap. + */ +#define IO_BITMAP_BITS 65536 +#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) +#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) +#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) +#define INVALID_IO_BITMAP_OFFSET 0x8000 +#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 + +struct tss_struct { + struct x86_hw_tss x86_tss; + + /* + * The extra 1 is there because the CPU will access an + * additional byte beyond the end of the IO permission + * bitmap. The extra byte must be all 1 bits, and must + * be within the limit. + */ + unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; + /* + * Cache the current maximum and the last task that used the bitmap: + */ + unsigned long io_bitmap_max; + struct thread_struct *io_bitmap_owner; + /* + * pads the TSS to be cacheline-aligned (size is 0x100) + */ + unsigned long __cacheline_filler[35]; + /* + * .. and then another 0x100 bytes for emergency kernel stack + */ + unsigned long stack[64]; +} __attribute__((packed)); + +DECLARE_PER_CPU(struct tss_struct, init_tss); + #ifdef CONFIG_X86_32 # include "processor_32.h" #else diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h index 6cd2149dcb..57b345bc3c 100644 --- a/include/asm-x86/processor_32.h +++ b/include/asm-x86/processor_32.h @@ -81,7 +81,6 @@ struct cpuinfo_x86 { extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; extern struct tss_struct doublefault_tss; -DECLARE_PER_CPU(struct tss_struct, init_tss); #ifdef CONFIG_SMP DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); @@ -123,16 +122,6 @@ extern unsigned int mca_pentium_flag; #define TASK_SIZE (PAGE_OFFSET) -/* - * Size of io_bitmap. - */ -#define IO_BITMAP_BITS 65536 -#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) -#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) -#define INVALID_IO_BITMAP_OFFSET 0x8000 -#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 - struct i387_fsave_struct { long cwd; long swd; @@ -185,57 +174,6 @@ typedef struct { unsigned long seg; } mm_segment_t; -struct thread_struct; - -/* This is the TSS defined by the hardware. */ -struct i386_hw_tss { - unsigned short back_link,__blh; - unsigned long sp0; - unsigned short ss0,__ss0h; - unsigned long sp1; - unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ - unsigned long sp2; - unsigned short ss2,__ss2h; - unsigned long __cr3; - unsigned long ip; - unsigned long flags; - unsigned long ax, cx, dx, bx; - unsigned long sp, bp, si, di; - unsigned short es, __esh; - unsigned short cs, __csh; - unsigned short ss, __ssh; - unsigned short ds, __dsh; - unsigned short fs, __fsh; - unsigned short gs, __gsh; - unsigned short ldt, __ldth; - unsigned short trace, io_bitmap_base; -} __attribute__((packed)); - -struct tss_struct { - struct i386_hw_tss x86_tss; - - /* - * The extra 1 is there because the CPU will access an - * additional byte beyond the end of the IO permission - * bitmap. The extra byte must be all 1 bits, and must - * be within the limit. - */ - unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; - /* - * Cache the current maximum and the last task that used the bitmap: - */ - unsigned long io_bitmap_max; - struct thread_struct *io_bitmap_owner; - /* - * pads the TSS to be cacheline-aligned (size is 0x100) - */ - unsigned long __cacheline_filler[35]; - /* - * .. and then another 0x100 bytes for emergency kernel stack - */ - unsigned long stack[64]; -} __attribute__((packed)); - #define ARCH_MIN_TASKALIGN 16 struct thread_struct { diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h index 1984a4a38b..8d342c23ad 100644 --- a/include/asm-x86/processor_64.h +++ b/include/asm-x86/processor_64.h @@ -91,14 +91,6 @@ extern void identify_cpu(struct cpuinfo_x86 *); #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) -/* - * Size of io_bitmap. - */ -#define IO_BITMAP_BITS 65536 -#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) -#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) -#define INVALID_IO_BITMAP_OFFSET 0x8000 struct i387_fxsave_struct { u16 cwd; @@ -118,32 +110,7 @@ union i387_union { struct i387_fxsave_struct fxsave; }; -struct tss_struct { - u32 reserved1; - u64 sp0; - u64 sp1; - u64 sp2; - u64 reserved2; - u64 ist[7]; - u32 reserved3; - u32 reserved4; - u16 reserved5; - u16 io_bitmap_base; - /* - * The extra 1 is there because the CPU will access an - * additional byte beyond the end of the IO permission - * bitmap. The extra byte must be all 1 bits, and must - * be within the limit. Thus we have: - * - * 128 bytes, the bitmap itself, for ports 0..0x3ff - * 8 bytes, for an extra "long" of ~0UL - */ - unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; -} __attribute__((packed)) ____cacheline_aligned; - - extern struct cpuinfo_x86 boot_cpu_data; -DECLARE_PER_CPU(struct tss_struct,init_tss); /* Save the original ist values for checking stack pointers during debugging */ struct orig_ist { unsigned long ist[7]; @@ -195,7 +162,7 @@ struct thread_struct { } #define INIT_TSS { \ - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } #define INIT_MMAP \