]> err.no Git - linux-2.6/blobdiff - include/asm-x86/paravirt.h
x86: get mp_bus_to_node early
[linux-2.6] / include / asm-x86 / paravirt.h
index 8f7984319c30e0c7454d01c726ce6b528371a28f..0f13b945e2400323cb65dfd6f8a7cc5b035c3319 100644 (file)
@@ -5,13 +5,27 @@
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/page.h>
+#include <asm/asm.h>
 
 /* Bitmask of what can be clobbered: usually at least eax. */
-#define CLBR_NONE 0x0
-#define CLBR_EAX 0x1
-#define CLBR_ECX 0x2
-#define CLBR_EDX 0x4
-#define CLBR_ANY 0x7
+#define CLBR_NONE 0
+#define CLBR_EAX  (1 << 0)
+#define CLBR_ECX  (1 << 1)
+#define CLBR_EDX  (1 << 2)
+
+#ifdef CONFIG_X86_64
+#define CLBR_RSI  (1 << 3)
+#define CLBR_RDI  (1 << 4)
+#define CLBR_R8   (1 << 5)
+#define CLBR_R9   (1 << 6)
+#define CLBR_R10  (1 << 7)
+#define CLBR_R11  (1 << 8)
+#define CLBR_ANY  ((1 << 9) - 1)
+#include <asm/desc_defs.h>
+#else
+/* CLBR_ANY should match all regs platform has. For i386, that's just it */
+#define CLBR_ANY  ((1 << 3) - 1)
+#endif /* X86_64 */
 
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
@@ -87,6 +101,11 @@ struct pv_cpu_ops {
        unsigned long (*read_cr4)(void);
        void (*write_cr4)(unsigned long);
 
+#ifdef CONFIG_X86_64
+       unsigned long (*read_cr8)(void);
+       void (*write_cr8)(unsigned long);
+#endif
+
        /* Segment descriptor handling */
        void (*load_tr_desc)(void);
        void (*load_gdt)(const struct desc_ptr *);
@@ -126,6 +145,8 @@ struct pv_cpu_ops {
        void (*irq_enable_syscall_ret)(void);
        void (*iret)(void);
 
+       void (*swapgs)(void);
+
        struct pv_lazy_ops lazy_mode;
 };
 
@@ -199,43 +220,53 @@ struct pv_mmu_ops {
                                 unsigned long va);
 
        /* Hooks for allocating/releasing pagetable pages */
-       void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
-       void (*alloc_pd)(u32 pfn);
-       void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
-       void (*release_pt)(u32 pfn);
-       void (*release_pd)(u32 pfn);
+       void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
+       void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
+       void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
+       void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
+       void (*release_pte)(u32 pfn);
+       void (*release_pmd)(u32 pfn);
+       void (*release_pud)(u32 pfn);
 
        /* Pagetable manipulation functions */
        void (*set_pte)(pte_t *ptep, pte_t pteval);
        void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
                           pte_t *ptep, pte_t pteval);
        void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
-       void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+       void (*pte_update)(struct mm_struct *mm, unsigned long addr,
+                          pte_t *ptep);
        void (*pte_update_defer)(struct mm_struct *mm,
                                 unsigned long addr, pte_t *ptep);
 
+       pteval_t (*pte_val)(pte_t);
+       pte_t (*make_pte)(pteval_t pte);
+
+       pgdval_t (*pgd_val)(pgd_t);
+       pgd_t (*make_pgd)(pgdval_t pgd);
+
+#if PAGETABLE_LEVELS >= 3
 #ifdef CONFIG_X86_PAE
        void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
        void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
                                pte_t *ptep, pte_t pte);
-       void (*set_pud)(pud_t *pudp, pud_t pudval);
-       void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+       void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
+                         pte_t *ptep);
        void (*pmd_clear)(pmd_t *pmdp);
 
-       unsigned long long (*pte_val)(pte_t);
-       unsigned long long (*pmd_val)(pmd_t);
-       unsigned long long (*pgd_val)(pgd_t);
+#endif /* CONFIG_X86_PAE */
 
-       pte_t (*make_pte)(unsigned long long pte);
-       pmd_t (*make_pmd)(unsigned long long pmd);
-       pgd_t (*make_pgd)(unsigned long long pgd);
-#else
-       unsigned long (*pte_val)(pte_t);
-       unsigned long (*pgd_val)(pgd_t);
+       void (*set_pud)(pud_t *pudp, pud_t pudval);
 
-       pte_t (*make_pte)(unsigned long pte);
-       pgd_t (*make_pgd)(unsigned long pgd);
-#endif
+       pmdval_t (*pmd_val)(pmd_t);
+       pmd_t (*make_pmd)(pmdval_t pmd);
+
+#if PAGETABLE_LEVELS == 4
+       pudval_t (*pud_val)(pud_t);
+       pud_t (*make_pud)(pudval_t pud);
+
+       void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
+#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* PAGETABLE_LEVELS >= 3 */
 
 #ifdef CONFIG_HIGHPTE
        void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
@@ -247,8 +278,7 @@ struct pv_mmu_ops {
 /* This contains all the paravirt structures: we get a convenient
  * number for each function using the offset which we use to indicate
  * what to patch. */
-struct paravirt_patch_template
-{
+struct paravirt_patch_template {
        struct pv_init_ops pv_init_ops;
        struct pv_time_ops pv_time_ops;
        struct pv_cpu_ops pv_cpu_ops;
@@ -281,7 +311,8 @@ extern struct pv_mmu_ops pv_mmu_ops;
 #define _paravirt_alt(insn_string, type, clobber)      \
        "771:\n\t" insn_string "\n" "772:\n"            \
        ".pushsection .parainstructions,\"a\"\n"        \
-       "  .long 771b\n"                                \
+       _ASM_ALIGN "\n"                                 \
+       _ASM_PTR " 771b\n"                              \
        "  .byte " type "\n"                            \
        "  .byte 772b-771b\n"                           \
        "  .short " clobber "\n"                        \
@@ -291,6 +322,11 @@ extern struct pv_mmu_ops pv_mmu_ops;
 #define paravirt_alt(insn_string)                                      \
        _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
 
+/* Simple instruction patching code. */
+#define DEF_NATIVE(ops, name, code)                                    \
+       extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+       asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
+
 unsigned paravirt_patch_nop(void);
 unsigned paravirt_patch_ignore(unsigned len);
 unsigned paravirt_patch_call(void *insnbuf,
@@ -305,6 +341,9 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
                              const char *start, const char *end);
 
+unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+                     unsigned long addr, unsigned len);
+
 int paravirt_disable_iospace(void);
 
 /*
@@ -585,6 +624,18 @@ static inline void write_cr4(unsigned long x)
        PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
 }
 
+#ifdef CONFIG_X86_64
+static inline unsigned long read_cr8(void)
+{
+       return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
+}
+
+static inline void write_cr8(unsigned long x)
+{
+       PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
+}
+#endif
+
 static inline void raw_safe_halt(void)
 {
        PVOP_VCALL0(pv_irq_ops.safe_halt);
@@ -612,43 +663,56 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
 }
 
 /* These should all do BUG_ON(_err), but our headers are too tangled. */
-#define rdmsr(msr,val1,val2) do {              \
+#define rdmsr(msr, val1, val2)                 \
+do {                                           \
        int _err;                               \
        u64 _l = paravirt_read_msr(msr, &_err); \
        val1 = (u32)_l;                         \
        val2 = _l >> 32;                        \
-} while(0)
+} while (0)
 
-#define wrmsr(msr,val1,val2) do {              \
+#define wrmsr(msr, val1, val2)                 \
+do {                                           \
        paravirt_write_msr(msr, val1, val2);    \
-} while(0)
+} while (0)
 
-#define rdmsrl(msr,val) do {                   \
+#define rdmsrl(msr, val)                       \
+do {                                           \
        int _err;                               \
        val = paravirt_read_msr(msr, &_err);    \
-} while(0)
+} while (0)
 
-#define wrmsrl(msr,val)                wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
-#define wrmsr_safe(msr,a,b)    paravirt_write_msr(msr, a, b)
+#define wrmsrl(msr, val)       wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
+#define wrmsr_safe(msr, a, b)  paravirt_write_msr(msr, a, b)
 
 /* rdmsr with exception handling */
-#define rdmsr_safe(msr,a,b) ({                 \
+#define rdmsr_safe(msr, a, b)                  \
+({                                             \
        int _err;                               \
        u64 _l = paravirt_read_msr(msr, &_err); \
        (*a) = (u32)_l;                         \
        (*b) = _l >> 32;                        \
-       _err; })
+       _err;                                   \
+})
+
+static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+{
+       int err;
 
+       *p = paravirt_read_msr(msr, &err);
+       return err;
+}
 
 static inline u64 paravirt_read_tsc(void)
 {
        return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
 }
 
-#define rdtscl(low) do {                       \
+#define rdtscl(low)                            \
+do {                                           \
        u64 _l = paravirt_read_tsc();           \
        low = (int)_l;                          \
-} while(0)
+} while (0)
 
 #define rdtscll(val) (val = paravirt_read_tsc())
 
@@ -663,11 +727,12 @@ static inline unsigned long long paravirt_read_pmc(int counter)
        return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
 }
 
-#define rdpmc(counter,low,high) do {           \
+#define rdpmc(counter, low, high)              \
+do {                                           \
        u64 _l = paravirt_read_pmc(counter);    \
        low = (u32)_l;                          \
        high = _l >> 32;                        \
-} while(0)
+} while (0)
 
 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
 {
@@ -746,7 +811,8 @@ static inline void set_iopl_mask(unsigned mask)
 }
 
 /* The paravirtualized I/O functions */
-static inline void slow_down_io(void) {
+static inline void slow_down_io(void)
+{
        pv_cpu_ops.io_delay();
 #ifdef REALLY_SLOW_IO
        pv_cpu_ops.io_delay();
@@ -846,28 +912,37 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
        PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
 }
 
-static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
+static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
 {
-       PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn);
+       PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
 }
-static inline void paravirt_release_pt(unsigned pfn)
+static inline void paravirt_release_pte(unsigned pfn)
 {
-       PVOP_VCALL1(pv_mmu_ops.release_pt, pfn);
+       PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
 }
 
-static inline void paravirt_alloc_pd(unsigned pfn)
+static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
 {
-       PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn);
+       PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
 }
 
-static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
-                                          unsigned start, unsigned count)
+static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
+                                           unsigned start, unsigned count)
 {
-       PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count);
+       PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
 }
-static inline void paravirt_release_pd(unsigned pfn)
+static inline void paravirt_release_pmd(unsigned pfn)
 {
-       PVOP_VCALL1(pv_mmu_ops.release_pd, pfn);
+       PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
+}
+
+static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
+{
+       PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
+}
+static inline void paravirt_release_pud(unsigned pfn)
+{
+       PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
 }
 
 #ifdef CONFIG_HIGHPTE
@@ -891,128 +966,236 @@ static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
        PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
 }
 
-#ifdef CONFIG_X86_PAE
-static inline pte_t __pte(unsigned long long val)
+static inline pte_t __pte(pteval_t val)
 {
-       unsigned long long ret = PVOP_CALL2(unsigned long long,
-                                           pv_mmu_ops.make_pte,
-                                           val, val >> 32);
-       return (pte_t) { ret, ret >> 32 };
+       pteval_t ret;
+
+       if (sizeof(pteval_t) > sizeof(long))
+               ret = PVOP_CALL2(pteval_t,
+                                pv_mmu_ops.make_pte,
+                                val, (u64)val >> 32);
+       else
+               ret = PVOP_CALL1(pteval_t,
+                                pv_mmu_ops.make_pte,
+                                val);
+
+       return (pte_t) { .pte = ret };
 }
 
-static inline pmd_t __pmd(unsigned long long val)
+static inline pteval_t pte_val(pte_t pte)
 {
-       return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd,
-                                   val, val >> 32) };
+       pteval_t ret;
+
+       if (sizeof(pteval_t) > sizeof(long))
+               ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
+                                pte.pte, (u64)pte.pte >> 32);
+       else
+               ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
+                                pte.pte);
+
+       return ret;
 }
 
-static inline pgd_t __pgd(unsigned long long val)
+static inline pgd_t __pgd(pgdval_t val)
 {
-       return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd,
-                                   val, val >> 32) };
+       pgdval_t ret;
+
+       if (sizeof(pgdval_t) > sizeof(long))
+               ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
+                                val, (u64)val >> 32);
+       else
+               ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
+                                val);
+
+       return (pgd_t) { ret };
 }
 
-static inline unsigned long long pte_val(pte_t x)
+static inline pgdval_t pgd_val(pgd_t pgd)
 {
-       return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val,
-                         x.pte_low, x.pte_high);
+       pgdval_t ret;
+
+       if (sizeof(pgdval_t) > sizeof(long))
+               ret =  PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
+                                 pgd.pgd, (u64)pgd.pgd >> 32);
+       else
+               ret =  PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
+                                 pgd.pgd);
+
+       return ret;
 }
 
-static inline unsigned long long pmd_val(pmd_t x)
+static inline void set_pte(pte_t *ptep, pte_t pte)
 {
-       return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val,
-                         x.pmd, x.pmd >> 32);
+       if (sizeof(pteval_t) > sizeof(long))
+               PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
+                           pte.pte, (u64)pte.pte >> 32);
+       else
+               PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
+                           pte.pte);
 }
 
-static inline unsigned long long pgd_val(pgd_t x)
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep, pte_t pte)
 {
-       return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val,
-                         x.pgd, x.pgd >> 32);
+       if (sizeof(pteval_t) > sizeof(long))
+               /* 5 arg words */
+               pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
+       else
+               PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
 }
 
-static inline void set_pte(pte_t *ptep, pte_t pteval)
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-       PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high);
+       pmdval_t val = native_pmd_val(pmd);
+
+       if (sizeof(pmdval_t) > sizeof(long))
+               PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
+       else
+               PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
 }
 
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
-                             pte_t *ptep, pte_t pteval)
+#if PAGETABLE_LEVELS >= 3
+static inline pmd_t __pmd(pmdval_t val)
 {
-       /* 5 arg words */
-       pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval);
+       pmdval_t ret;
+
+       if (sizeof(pmdval_t) > sizeof(long))
+               ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
+                                val, (u64)val >> 32);
+       else
+               ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
+                                val);
+
+       return (pmd_t) { ret };
 }
 
-static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
+static inline pmdval_t pmd_val(pmd_t pmd)
 {
-       PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
-                   pteval.pte_low, pteval.pte_high);
+       pmdval_t ret;
+
+       if (sizeof(pmdval_t) > sizeof(long))
+               ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
+                                 pmd.pmd, (u64)pmd.pmd >> 32);
+       else
+               ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
+                                 pmd.pmd);
+
+       return ret;
 }
 
-static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
-                                  pte_t *ptep, pte_t pte)
+static inline void set_pud(pud_t *pudp, pud_t pud)
 {
-       /* 5 arg words */
-       pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
+       pudval_t val = native_pud_val(pud);
+
+       if (sizeof(pudval_t) > sizeof(long))
+               PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
+                           val, (u64)val >> 32);
+       else
+               PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
+                           val);
+}
+#if PAGETABLE_LEVELS == 4
+static inline pud_t __pud(pudval_t val)
+{
+       pudval_t ret;
+
+       if (sizeof(pudval_t) > sizeof(long))
+               ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
+                                val, (u64)val >> 32);
+       else
+               ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
+                                val);
+
+       return (pud_t) { ret };
 }
 
-static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
+static inline pudval_t pud_val(pud_t pud)
 {
-       PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp,
-                   pmdval.pmd, pmdval.pmd >> 32);
+       pudval_t ret;
+
+       if (sizeof(pudval_t) > sizeof(long))
+               ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
+                                 pud.pud, (u64)pud.pud >> 32);
+       else
+               ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
+                                 pud.pud);
+
+       return ret;
 }
 
-static inline void set_pud(pud_t *pudp, pud_t pudval)
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-       PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
-                   pudval.pgd.pgd, pudval.pgd.pgd >> 32);
+       pgdval_t val = native_pgd_val(pgd);
+
+       if (sizeof(pgdval_t) > sizeof(long))
+               PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
+                           val, (u64)val >> 32);
+       else
+               PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
+                           val);
 }
 
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void pgd_clear(pgd_t *pgdp)
 {
-       PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
+       set_pgd(pgdp, __pgd(0));
 }
 
-static inline void pmd_clear(pmd_t *pmdp)
+static inline void pud_clear(pud_t *pudp)
 {
-       PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
+       set_pud(pudp, __pud(0));
 }
 
-#else  /* !CONFIG_X86_PAE */
+#endif /* PAGETABLE_LEVELS == 4 */
 
-static inline pte_t __pte(unsigned long val)
+#endif /* PAGETABLE_LEVELS >= 3 */
+
+#ifdef CONFIG_X86_PAE
+/* Special-case pte-setting operations for PAE, which can't update a
+   64-bit pte atomically */
+static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 {
-       return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) };
+       PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
+                   pte.pte, pte.pte >> 32);
 }
 
-static inline pgd_t __pgd(unsigned long val)
+static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *ptep, pte_t pte)
 {
-       return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) };
+       /* 5 arg words */
+       pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
 }
 
-static inline unsigned long pte_val(pte_t x)
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+                            pte_t *ptep)
 {
-       return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low);
+       PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
 }
 
-static inline unsigned long pgd_val(pgd_t x)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
+}
+#else  /* !CONFIG_X86_PAE */
+static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 {
-       return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd);
+       set_pte(ptep, pte);
 }
 
-static inline void set_pte(pte_t *ptep, pte_t pteval)
+static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *ptep, pte_t pte)
 {
-       PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low);
+       set_pte(ptep, pte);
 }
 
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
-                             pte_t *ptep, pte_t pteval)
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+                            pte_t *ptep)
 {
-       PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low);
+       set_pte_at(mm, addr, ptep, __pte(0));
 }
 
-static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
+static inline void pmd_clear(pmd_t *pmdp)
 {
-       PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd);
+       set_pmd(pmdp, __pmd(0));
 }
 #endif /* CONFIG_X86_PAE */
 
@@ -1083,52 +1266,68 @@ struct paravirt_patch_site {
 extern struct paravirt_patch_site __parainstructions[],
        __parainstructions_end[];
 
+#ifdef CONFIG_X86_32
+#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
+#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
+#define PV_FLAGS_ARG "0"
+#define PV_EXTRA_CLOBBERS
+#define PV_VEXTRA_CLOBBERS
+#else
+/* We save some registers, but all of them, that's too much. We clobber all
+ * caller saved registers but the argument parameter */
+#define PV_SAVE_REGS "pushq %%rdi;"
+#define PV_RESTORE_REGS "popq %%rdi;"
+#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx"
+#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx"
+#define PV_FLAGS_ARG "D"
+#endif
+
 static inline unsigned long __raw_local_save_flags(void)
 {
        unsigned long f;
 
-       asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
+       asm volatile(paravirt_alt(PV_SAVE_REGS
                                  PARAVIRT_CALL
-                                 "popl %%edx; popl %%ecx")
+                                 PV_RESTORE_REGS)
                     : "=a"(f)
                     : paravirt_type(pv_irq_ops.save_fl),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "cc");
+                    : "memory", "cc" PV_VEXTRA_CLOBBERS);
        return f;
 }
 
 static inline void raw_local_irq_restore(unsigned long f)
 {
-       asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
+       asm volatile(paravirt_alt(PV_SAVE_REGS
                                  PARAVIRT_CALL
-                                 "popl %%edx; popl %%ecx")
+                                 PV_RESTORE_REGS)
                     : "=a"(f)
-                    : "0"(f),
+                    : PV_FLAGS_ARG(f),
                       paravirt_type(pv_irq_ops.restore_fl),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "cc");
+                    : "memory", "cc" PV_EXTRA_CLOBBERS);
 }
 
 static inline void raw_local_irq_disable(void)
 {
-       asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
+       asm volatile(paravirt_alt(PV_SAVE_REGS
                                  PARAVIRT_CALL
-                                 "popl %%edx; popl %%ecx")
+                                 PV_RESTORE_REGS)
                     :
                     : paravirt_type(pv_irq_ops.irq_disable),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "eax", "cc");
+                    : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
 }
 
 static inline void raw_local_irq_enable(void)
 {
-       asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
+       asm volatile(paravirt_alt(PV_SAVE_REGS
                                  PARAVIRT_CALL
-                                 "popl %%edx; popl %%ecx")
+                                 PV_RESTORE_REGS)
                     :
                     : paravirt_type(pv_irq_ops.irq_enable),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "eax", "cc");
+                    : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
 }
 
 static inline unsigned long __raw_local_irq_save(void)
@@ -1157,44 +1356,72 @@ static inline unsigned long __raw_local_irq_save(void)
 
 #else  /* __ASSEMBLY__ */
 
-#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
-
-#define PARA_SITE(ptype, clobbers, ops)                \
+#define _PVSITE(ptype, clobbers, ops, word, algn)      \
 771:;                                          \
        ops;                                    \
 772:;                                          \
        .pushsection .parainstructions,"a";     \
-        .long 771b;                            \
+        .align algn;                           \
+        word 771b;                             \
         .byte ptype;                           \
         .byte 772b-771b;                       \
         .short clobbers;                       \
        .popsection
 
+
+#ifdef CONFIG_X86_64
+#define PV_SAVE_REGS   pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx
+#define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax
+#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
+#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
+#else
+#define PV_SAVE_REGS   pushl %eax; pushl %edi; pushl %ecx; pushl %edx
+#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
+#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
+#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+#endif
+
 #define INTERRUPT_RETURN                                               \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
                  jmp *%cs:pv_cpu_ops+PV_CPU_iret)
 
 #define DISABLE_INTERRUPTS(clobbers)                                   \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
-                 pushl %eax; pushl %ecx; pushl %edx;                   \
+                 PV_SAVE_REGS;                 \
                  call *%cs:pv_irq_ops+PV_IRQ_irq_disable;              \
-                 popl %edx; popl %ecx; popl %eax)                      \
+                 PV_RESTORE_REGS;)                     \
 
 #define ENABLE_INTERRUPTS(clobbers)                                    \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
-                 pushl %eax; pushl %ecx; pushl %edx;                   \
+                 PV_SAVE_REGS;                 \
                  call *%cs:pv_irq_ops+PV_IRQ_irq_enable;               \
-                 popl %edx; popl %ecx; popl %eax)
+                 PV_RESTORE_REGS;)
 
 #define ENABLE_INTERRUPTS_SYSCALL_RET                                  \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\
                  CLBR_NONE,                                            \
                  jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret)
 
+
+#ifdef CONFIG_X86_32
 #define GET_CR0_INTO_EAX                       \
        push %ecx; push %edx;                   \
        call *pv_cpu_ops+PV_CPU_read_cr0;       \
        pop %edx; pop %ecx
+#else
+#define SWAPGS                                                         \
+       PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
+                 PV_SAVE_REGS;                                         \
+                 call *pv_cpu_ops+PV_CPU_swapgs;                       \
+                 PV_RESTORE_REGS                                       \
+                )
+
+#define GET_CR2_INTO_RCX                       \
+       call *pv_mmu_ops+PV_MMU_read_cr2;       \
+       movq %rax, %rcx;                        \
+       xorq %rax, %rax;
+
+#endif
 
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_PARAVIRT */