]> err.no Git - linux-2.6/blobdiff - include/asm-x86/paravirt.h
x86: re-add rdmsrl_safe
[linux-2.6] / include / asm-x86 / paravirt.h
index 73547acbbbf5ce11165870f35caa238c0059fc20..3d419398499b4a6fe14de3eec92761f2e80ba19a 100644 (file)
@@ -221,7 +221,7 @@ struct pv_mmu_ops {
 
        /* Hooks for allocating/releasing pagetable pages */
        void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
-       void (*alloc_pd)(u32 pfn);
+       void (*alloc_pd)(struct mm_struct *mm, u32 pfn);
        void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
        void (*release_pt)(u32 pfn);
        void (*release_pd)(u32 pfn);
@@ -231,7 +231,8 @@ struct pv_mmu_ops {
        void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
                           pte_t *ptep, pte_t pteval);
        void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
-       void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+       void (*pte_update)(struct mm_struct *mm, unsigned long addr,
+                          pte_t *ptep);
        void (*pte_update_defer)(struct mm_struct *mm,
                                 unsigned long addr, pte_t *ptep);
 
@@ -246,7 +247,8 @@ struct pv_mmu_ops {
        void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
        void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
                                pte_t *ptep, pte_t pte);
-       void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+       void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
+                         pte_t *ptep);
        void (*pmd_clear)(pmd_t *pmdp);
 
 #endif /* CONFIG_X86_PAE */
@@ -259,6 +261,8 @@ struct pv_mmu_ops {
 #if PAGETABLE_LEVELS == 4
        pudval_t (*pud_val)(pud_t);
        pud_t (*make_pud)(pudval_t pud);
+
+       void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
 #endif /* PAGETABLE_LEVELS == 4 */
 #endif /* PAGETABLE_LEVELS >= 3 */
 
@@ -272,8 +276,7 @@ struct pv_mmu_ops {
 /* This contains all the paravirt structures: we get a convenient
  * number for each function using the offset which we use to indicate
  * what to patch. */
-struct paravirt_patch_template
-{
+struct paravirt_patch_template {
        struct pv_init_ops pv_init_ops;
        struct pv_time_ops pv_time_ops;
        struct pv_cpu_ops pv_cpu_ops;
@@ -619,6 +622,7 @@ static inline void write_cr4(unsigned long x)
        PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
 }
 
+#ifdef CONFIG_X86_64
 static inline unsigned long read_cr8(void)
 {
        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
@@ -628,6 +632,7 @@ static inline void write_cr8(unsigned long x)
 {
        PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
 }
+#endif
 
 static inline void raw_safe_halt(void)
 {
@@ -656,43 +661,56 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
 }
 
 /* These should all do BUG_ON(_err), but our headers are too tangled. */
-#define rdmsr(msr,val1,val2) do {              \
+#define rdmsr(msr, val1, val2)                 \
+do {                                           \
        int _err;                               \
        u64 _l = paravirt_read_msr(msr, &_err); \
        val1 = (u32)_l;                         \
        val2 = _l >> 32;                        \
-} while(0)
+} while (0)
 
-#define wrmsr(msr,val1,val2) do {              \
+#define wrmsr(msr, val1, val2)                 \
+do {                                           \
        paravirt_write_msr(msr, val1, val2);    \
-} while(0)
+} while (0)
 
-#define rdmsrl(msr,val) do {                   \
+#define rdmsrl(msr, val)                       \
+do {                                           \
        int _err;                               \
        val = paravirt_read_msr(msr, &_err);    \
-} while(0)
+} while (0)
 
-#define wrmsrl(msr,val)                wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
-#define wrmsr_safe(msr,a,b)    paravirt_write_msr(msr, a, b)
+#define wrmsrl(msr, val)       wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
+#define wrmsr_safe(msr, a, b)  paravirt_write_msr(msr, a, b)
 
 /* rdmsr with exception handling */
-#define rdmsr_safe(msr,a,b) ({                 \
+#define rdmsr_safe(msr, a, b)                  \
+({                                             \
        int _err;                               \
        u64 _l = paravirt_read_msr(msr, &_err); \
        (*a) = (u32)_l;                         \
        (*b) = _l >> 32;                        \
-       _err; })
+       _err;                                   \
+})
 
+static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+{
+       int err;
+
+       *p = paravirt_read_msr(msr, &err);
+       return err;
+}
 
 static inline u64 paravirt_read_tsc(void)
 {
        return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
 }
 
-#define rdtscl(low) do {                       \
+#define rdtscl(low)                            \
+do {                                           \
        u64 _l = paravirt_read_tsc();           \
        low = (int)_l;                          \
-} while(0)
+} while (0)
 
 #define rdtscll(val) (val = paravirt_read_tsc())
 
@@ -707,11 +725,12 @@ static inline unsigned long long paravirt_read_pmc(int counter)
        return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
 }
 
-#define rdpmc(counter,low,high) do {           \
+#define rdpmc(counter, low, high)              \
+do {                                           \
        u64 _l = paravirt_read_pmc(counter);    \
        low = (u32)_l;                          \
        high = _l >> 32;                        \
-} while(0)
+} while (0)
 
 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
 {
@@ -790,7 +809,8 @@ static inline void set_iopl_mask(unsigned mask)
 }
 
 /* The paravirtualized I/O functions */
-static inline void slow_down_io(void) {
+static inline void slow_down_io(void)
+{
        pv_cpu_ops.io_delay();
 #ifdef REALLY_SLOW_IO
        pv_cpu_ops.io_delay();
@@ -899,9 +919,9 @@ static inline void paravirt_release_pt(unsigned pfn)
        PVOP_VCALL1(pv_mmu_ops.release_pt, pfn);
 }
 
-static inline void paravirt_alloc_pd(unsigned pfn)
+static inline void paravirt_alloc_pd(struct mm_struct *mm, unsigned pfn)
 {
-       PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn);
+       PVOP_VCALL2(pv_mmu_ops.alloc_pd, mm, pfn);
 }
 
 static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
@@ -1023,6 +1043,101 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
                PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
 }
 
+#if PAGETABLE_LEVELS >= 3
+static inline pmd_t __pmd(pmdval_t val)
+{
+       pmdval_t ret;
+
+       if (sizeof(pmdval_t) > sizeof(long))
+               ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
+                                val, (u64)val >> 32);
+       else
+               ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
+                                val);
+
+       return (pmd_t) { ret };
+}
+
+static inline pmdval_t pmd_val(pmd_t pmd)
+{
+       pmdval_t ret;
+
+       if (sizeof(pmdval_t) > sizeof(long))
+               ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
+                                 pmd.pmd, (u64)pmd.pmd >> 32);
+       else
+               ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
+                                 pmd.pmd);
+
+       return ret;
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+       pudval_t val = native_pud_val(pud);
+
+       if (sizeof(pudval_t) > sizeof(long))
+               PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
+                           val, (u64)val >> 32);
+       else
+               PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
+                           val);
+}
+#if PAGETABLE_LEVELS == 4
+static inline pud_t __pud(pudval_t val)
+{
+       pudval_t ret;
+
+       if (sizeof(pudval_t) > sizeof(long))
+               ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
+                                val, (u64)val >> 32);
+       else
+               ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
+                                val);
+
+       return (pud_t) { ret };
+}
+
+static inline pudval_t pud_val(pud_t pud)
+{
+       pudval_t ret;
+
+       if (sizeof(pudval_t) > sizeof(long))
+               ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
+                                 pud.pud, (u64)pud.pud >> 32);
+       else
+               ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
+                                 pud.pud);
+
+       return ret;
+}
+
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+       pgdval_t val = native_pgd_val(pgd);
+
+       if (sizeof(pgdval_t) > sizeof(long))
+               PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
+                           val, (u64)val >> 32);
+       else
+               PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
+                           val);
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+       set_pgd(pgdp, __pgd(0));
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+       set_pud(pudp, __pud(0));
+}
+
+#endif /* PAGETABLE_LEVELS == 4 */
+
+#endif /* PAGETABLE_LEVELS >= 3 */
+
 #ifdef CONFIG_X86_PAE
 /* Special-case pte-setting operations for PAE, which can't update a
    64-bit pte atomically */
@@ -1073,48 +1188,6 @@ static inline void pmd_clear(pmd_t *pmdp)
 }
 #endif /* CONFIG_X86_PAE */
 
-#if PAGETABLE_LEVELS >= 3
-static inline pmd_t __pmd(pmdval_t val)
-{
-       pmdval_t ret;
-
-       if (sizeof(pmdval_t) > sizeof(long))
-               ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
-                                val, (u64)val >> 32);
-       else
-               ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
-                                val);
-
-       return (pmd_t) { ret };
-}
-
-static inline pmdval_t pmd_val(pmd_t pmd)
-{
-       pmdval_t ret;
-
-       if (sizeof(pmdval_t) > sizeof(long))
-               ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
-                                 pmd.pmd, (u64)pmd.pmd >> 32);
-       else
-               ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
-                                 pmd.pmd);
-
-       return ret;
-}
-
-static inline void set_pud(pud_t *pudp, pud_t pud)
-{
-       pudval_t val = native_pud_val(pud);
-
-       if (sizeof(pudval_t) > sizeof(long))
-               PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
-                           val, (u64)val >> 32);
-       else
-               PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
-                           val);
-}
-#endif /* PAGETABLE_LEVELS >= 3 */
-
 /* Lazy mode for batching updates / context switch */
 enum paravirt_lazy_mode {
        PARAVIRT_LAZY_NONE,