X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-x86%2Fparavirt.h;h=0f13b945e2400323cb65dfd6f8a7cc5b035c3319;hb=871d5f8dd0f7647f03facd4cb79485938d1b61ab;hp=c56b17a5eba07bc6f42db05e62a25a7ad844fde0;hpb=28c6075c72a312986f6d7802005251bdb91ca80c;p=linux-2.6 diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index c56b17a5eb..0f13b945e2 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -101,6 +101,11 @@ struct pv_cpu_ops { unsigned long (*read_cr4)(void); void (*write_cr4)(unsigned long); +#ifdef CONFIG_X86_64 + unsigned long (*read_cr8)(void); + void (*write_cr8)(unsigned long); +#endif + /* Segment descriptor handling */ void (*load_tr_desc)(void); void (*load_gdt)(const struct desc_ptr *); @@ -215,18 +220,21 @@ struct pv_mmu_ops { unsigned long va); /* Hooks for allocating/releasing pagetable pages */ - void (*alloc_pt)(struct mm_struct *mm, u32 pfn); - void (*alloc_pd)(u32 pfn); - void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); - void (*release_pt)(u32 pfn); - void (*release_pd)(u32 pfn); + void (*alloc_pte)(struct mm_struct *mm, u32 pfn); + void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); + void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); + void (*alloc_pud)(struct mm_struct *mm, u32 pfn); + void (*release_pte)(u32 pfn); + void (*release_pmd)(u32 pfn); + void (*release_pud)(u32 pfn); /* Pagetable manipulation functions */ void (*set_pte)(pte_t *ptep, pte_t pteval); void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval); void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); - void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + void (*pte_update)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); void (*pte_update_defer)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); @@ -241,7 +249,8 @@ struct pv_mmu_ops { void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); - void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); + void (*pte_clear)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); void (*pmd_clear)(pmd_t *pmdp); #endif /* CONFIG_X86_PAE */ @@ -254,6 +263,8 @@ struct pv_mmu_ops { #if PAGETABLE_LEVELS == 4 pudval_t (*pud_val)(pud_t); pud_t (*make_pud)(pudval_t pud); + + void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); #endif /* PAGETABLE_LEVELS == 4 */ #endif /* PAGETABLE_LEVELS >= 3 */ @@ -267,8 +278,7 @@ struct pv_mmu_ops { /* This contains all the paravirt structures: we get a convenient * number for each function using the offset which we use to indicate * what to patch. */ -struct paravirt_patch_template -{ +struct paravirt_patch_template { struct pv_init_ops pv_init_ops; struct pv_time_ops pv_time_ops; struct pv_cpu_ops pv_cpu_ops; @@ -614,6 +624,18 @@ static inline void write_cr4(unsigned long x) PVOP_VCALL1(pv_cpu_ops.write_cr4, x); } +#ifdef CONFIG_X86_64 +static inline unsigned long read_cr8(void) +{ + return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); +} + +static inline void write_cr8(unsigned long x) +{ + PVOP_VCALL1(pv_cpu_ops.write_cr8, x); +} +#endif + static inline void raw_safe_halt(void) { PVOP_VCALL0(pv_irq_ops.safe_halt); @@ -641,43 +663,56 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) } /* These should all do BUG_ON(_err), but our headers are too tangled. */ -#define rdmsr(msr,val1,val2) do { \ +#define rdmsr(msr, val1, val2) \ +do { \ int _err; \ u64 _l = paravirt_read_msr(msr, &_err); \ val1 = (u32)_l; \ val2 = _l >> 32; \ -} while(0) +} while (0) -#define wrmsr(msr,val1,val2) do { \ +#define wrmsr(msr, val1, val2) \ +do { \ paravirt_write_msr(msr, val1, val2); \ -} while(0) +} while (0) -#define rdmsrl(msr,val) do { \ +#define rdmsrl(msr, val) \ +do { \ int _err; \ val = paravirt_read_msr(msr, &_err); \ -} while(0) +} while (0) -#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) -#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b) +#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) +#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) /* rdmsr with exception handling */ -#define rdmsr_safe(msr,a,b) ({ \ +#define rdmsr_safe(msr, a, b) \ +({ \ int _err; \ u64 _l = paravirt_read_msr(msr, &_err); \ (*a) = (u32)_l; \ (*b) = _l >> 32; \ - _err; }) + _err; \ +}) + +static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) +{ + int err; + *p = paravirt_read_msr(msr, &err); + return err; +} static inline u64 paravirt_read_tsc(void) { return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); } -#define rdtscl(low) do { \ +#define rdtscl(low) \ +do { \ u64 _l = paravirt_read_tsc(); \ low = (int)_l; \ -} while(0) +} while (0) #define rdtscll(val) (val = paravirt_read_tsc()) @@ -692,11 +727,12 @@ static inline unsigned long long paravirt_read_pmc(int counter) return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); } -#define rdpmc(counter,low,high) do { \ +#define rdpmc(counter, low, high) \ +do { \ u64 _l = paravirt_read_pmc(counter); \ low = (u32)_l; \ high = _l >> 32; \ -} while(0) +} while (0) static inline unsigned long long paravirt_rdtscp(unsigned int *aux) { @@ -775,7 +811,8 @@ static inline void set_iopl_mask(unsigned mask) } /* The paravirtualized I/O functions */ -static inline void slow_down_io(void) { +static inline void slow_down_io(void) +{ pv_cpu_ops.io_delay(); #ifdef REALLY_SLOW_IO pv_cpu_ops.io_delay(); @@ -875,28 +912,37 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); } -static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) +static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) { - PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn); + PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); } -static inline void paravirt_release_pt(unsigned pfn) +static inline void paravirt_release_pte(unsigned pfn) { - PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); + PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); } -static inline void paravirt_alloc_pd(unsigned pfn) +static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn) { - PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn); + PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); } -static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, - unsigned start, unsigned count) +static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn, + unsigned start, unsigned count) { - PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count); + PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); } -static inline void paravirt_release_pd(unsigned pfn) +static inline void paravirt_release_pmd(unsigned pfn) { - PVOP_VCALL1(pv_mmu_ops.release_pd, pfn); + PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); +} + +static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn) +{ + PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); +} +static inline void paravirt_release_pud(unsigned pfn) +{ + PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); } #ifdef CONFIG_HIGHPTE @@ -1008,6 +1054,101 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); } +#if PAGETABLE_LEVELS >= 3 +static inline pmd_t __pmd(pmdval_t val) +{ + pmdval_t ret; + + if (sizeof(pmdval_t) > sizeof(long)) + ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, + val, (u64)val >> 32); + else + ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, + val); + + return (pmd_t) { ret }; +} + +static inline pmdval_t pmd_val(pmd_t pmd) +{ + pmdval_t ret; + + if (sizeof(pmdval_t) > sizeof(long)) + ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, + pmd.pmd, (u64)pmd.pmd >> 32); + else + ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, + pmd.pmd); + + return ret; +} + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + pudval_t val = native_pud_val(pud); + + if (sizeof(pudval_t) > sizeof(long)) + PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, + val, (u64)val >> 32); + else + PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, + val); +} +#if PAGETABLE_LEVELS == 4 +static inline pud_t __pud(pudval_t val) +{ + pudval_t ret; + + if (sizeof(pudval_t) > sizeof(long)) + ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, + val, (u64)val >> 32); + else + ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, + val); + + return (pud_t) { ret }; +} + +static inline pudval_t pud_val(pud_t pud) +{ + pudval_t ret; + + if (sizeof(pudval_t) > sizeof(long)) + ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, + pud.pud, (u64)pud.pud >> 32); + else + ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, + pud.pud); + + return ret; +} + +static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) +{ + pgdval_t val = native_pgd_val(pgd); + + if (sizeof(pgdval_t) > sizeof(long)) + PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp, + val, (u64)val >> 32); + else + PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, + val); +} + +static inline void pgd_clear(pgd_t *pgdp) +{ + set_pgd(pgdp, __pgd(0)); +} + +static inline void pud_clear(pud_t *pudp) +{ + set_pud(pudp, __pud(0)); +} + +#endif /* PAGETABLE_LEVELS == 4 */ + +#endif /* PAGETABLE_LEVELS >= 3 */ + #ifdef CONFIG_X86_PAE /* Special-case pte-setting operations for PAE, which can't update a 64-bit pte atomically */ @@ -1058,48 +1199,6 @@ static inline void pmd_clear(pmd_t *pmdp) } #endif /* CONFIG_X86_PAE */ -#if PAGETABLE_LEVELS >= 3 -static inline pmd_t __pmd(pmdval_t val) -{ - pmdval_t ret; - - if (sizeof(pmdval_t) > sizeof(long)) - ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, - val, (u64)val >> 32); - else - ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, - val); - - return (pmd_t) { ret }; -} - -static inline pmdval_t pmd_val(pmd_t pmd) -{ - pmdval_t ret; - - if (sizeof(pmdval_t) > sizeof(long)) - ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, - pmd.pmd, (u64)pmd.pmd >> 32); - else - ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, - pmd.pmd); - - return ret; -} - -static inline void set_pud(pud_t *pudp, pud_t pud) -{ - pudval_t val = native_pud_val(pud); - - if (sizeof(pudval_t) > sizeof(long)) - PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, - val, (u64)val >> 32); - else - PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, - val); -} -#endif /* PAGETABLE_LEVELS >= 3 */ - /* Lazy mode for batching updates / context switch */ enum paravirt_lazy_mode { PARAVIRT_LAZY_NONE,