]> err.no Git - linux-2.6/blobdiff - drivers/kvm/mmu.c
KVM: Convert vm lock to a mutex
[linux-2.6] / drivers / kvm / mmu.c
index 2079d69f186a13e051b317fa2f65821a9c2f804f..e303b4137bfa763f467dbcd367c1ed9d16a77083 100644 (file)
  * the COPYING file in the top-level directory.
  *
  */
+
+#include "vmx.h"
+#include "kvm.h"
+
 #include <linux/types.h>
 #include <linux/string.h>
-#include <asm/page.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/module.h>
 
-#include "vmx.h"
-#include "kvm.h"
+#include <asm/page.h>
+#include <asm/cmpxchg.h>
 
 #undef MMU_DEBUG
 
@@ -90,25 +93,11 @@ static int dbg = 1;
 #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
 
 
-#define PT32_PTE_COPY_MASK \
-       (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
-
-#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
-
 #define PT_FIRST_AVAIL_BITS_SHIFT 9
 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
 
-#define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
 
-#define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
-#define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
-
-#define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
-#define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
-
-#define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
-
 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
 
 #define PT64_LEVEL_BITS 9
@@ -165,12 +154,11 @@ struct kvm_rmap_desc {
 
 static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
-static struct kmem_cache *mmu_page_cache;
 static struct kmem_cache *mmu_page_header_cache;
 
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
-       return vcpu->cr0 & CR0_WP_MASK;
+       return vcpu->cr0 & X86_CR0_WP;
 }
 
 static int is_cpuid_PSE36(void)
@@ -204,6 +192,15 @@ static int is_rmap_pte(u64 pte)
                == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
 }
 
+static void set_shadow_pte(u64 *sptep, u64 spte)
+{
+#ifdef CONFIG_X86_64
+       set_64bit((unsigned long *)sptep, spte);
+#else
+       set_64bit((unsigned long long *)sptep, spte);
+#endif
+}
+
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
                                  struct kmem_cache *base_cache, int min,
                                  gfp_t gfp_flags)
@@ -227,6 +224,29 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
                kfree(mc->objects[--mc->nobjs]);
 }
 
+static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
+                                      int min, gfp_t gfp_flags)
+{
+       struct page *page;
+
+       if (cache->nobjs >= min)
+               return 0;
+       while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
+               page = alloc_page(gfp_flags);
+               if (!page)
+                       return -ENOMEM;
+               set_page_private(page, 0);
+               cache->objects[cache->nobjs++] = page_address(page);
+       }
+       return 0;
+}
+
+static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
+{
+       while (mc->nobjs)
+               free_page((unsigned long)mc->objects[--mc->nobjs]);
+}
+
 static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
 {
        int r;
@@ -239,8 +259,7 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
                                   rmap_desc_cache, 1, gfp_flags);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache(&vcpu->mmu_page_cache,
-                                  mmu_page_cache, 4, gfp_flags);
+       r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags);
        if (r)
                goto out;
        r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
@@ -254,12 +273,11 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
        int r;
 
        r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
+       kvm_mmu_free_some_pages(vcpu);
        if (r < 0) {
-               spin_unlock(&vcpu->kvm->lock);
-               kvm_arch_ops->vcpu_put(vcpu);
+               mutex_unlock(&vcpu->kvm->lock);
                r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
-               kvm_arch_ops->vcpu_load(vcpu);
-               spin_lock(&vcpu->kvm->lock);
+               mutex_lock(&vcpu->kvm->lock);
        }
        return r;
 }
@@ -268,7 +286,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
        mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
        mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
-       mmu_free_memory_cache(&vcpu->mmu_page_cache);
+       mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
        mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
 }
 
@@ -283,24 +301,15 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
        return p;
 }
 
-static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
-{
-       if (mc->nobjs < KVM_NR_MEM_OBJS)
-               mc->objects[mc->nobjs++] = obj;
-       else
-               kfree(obj);
-}
-
 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
 {
        return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
                                      sizeof(struct kvm_pte_chain));
 }
 
-static void mmu_free_pte_chain(struct kvm_vcpu *vcpu,
-                              struct kvm_pte_chain *pc)
+static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
 {
-       mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc);
+       kfree(pc);
 }
 
 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
@@ -309,10 +318,9 @@ static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
                                      sizeof(struct kvm_rmap_desc));
 }
 
-static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu,
-                              struct kvm_rmap_desc *rd)
+static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
 {
-       mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd);
+       kfree(rd);
 }
 
 /*
@@ -357,8 +365,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
        }
 }
 
-static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
-                                  struct page *page,
+static void rmap_desc_remove_entry(struct page *page,
                                   struct kvm_rmap_desc *desc,
                                   int i,
                                   struct kvm_rmap_desc *prev_desc)
@@ -378,10 +385,10 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
                        prev_desc->more = desc->more;
                else
                        set_page_private(page,(unsigned long)desc->more | 1);
-       mmu_free_rmap_desc(vcpu, desc);
+       mmu_free_rmap_desc(desc);
 }
 
-static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
+static void rmap_remove(u64 *spte)
 {
        struct page *page;
        struct kvm_rmap_desc *desc;
@@ -409,7 +416,7 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
                while (desc) {
                        for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
                                if (desc->shadow_ptes[i] == spte) {
-                                       rmap_desc_remove_entry(vcpu, page,
+                                       rmap_desc_remove_entry(page,
                                                               desc, i,
                                                               prev_desc);
                                        return;
@@ -444,9 +451,9 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                BUG_ON(!(*spte & PT_WRITABLE_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
-               rmap_remove(vcpu, spte);
-               kvm_arch_ops->tlb_flush(vcpu);
-               *spte &= ~(u64)PT_WRITABLE_MASK;
+               rmap_remove(spte);
+               set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
+               kvm_flush_remote_tlbs(vcpu->kvm);
        }
 }
 
@@ -466,14 +473,14 @@ static int is_empty_shadow_page(u64 *spt)
 }
 #endif
 
-static void kvm_mmu_free_page(struct kvm_vcpu *vcpu,
+static void kvm_mmu_free_page(struct kvm *kvm,
                              struct kvm_mmu_page *page_head)
 {
        ASSERT(is_empty_shadow_page(page_head->spt));
        list_del(&page_head->link);
-       mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt);
-       mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head);
-       ++vcpu->kvm->n_free_mmu_pages;
+       __free_page(virt_to_page(page_head->spt));
+       kfree(page_head);
+       ++kvm->n_free_mmu_pages;
 }
 
 static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -539,8 +546,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
        pte_chain->parent_ptes[0] = parent_pte;
 }
 
-static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
-                                      struct kvm_mmu_page *page,
+static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
                                       u64 *parent_pte)
 {
        struct kvm_pte_chain *pte_chain;
@@ -567,7 +573,7 @@ static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
                        pte_chain->parent_ptes[i] = NULL;
                        if (i == 0) {
                                hlist_del(&pte_chain->link);
-                               mmu_free_pte_chain(vcpu, pte_chain);
+                               mmu_free_pte_chain(pte_chain);
                                if (hlist_empty(&page->parent_ptes)) {
                                        page->multimapped = 0;
                                        page->parent_pte = NULL;
@@ -645,7 +651,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        return page;
 }
 
-static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
+static void kvm_mmu_page_unlink_children(struct kvm *kvm,
                                         struct kvm_mmu_page *page)
 {
        unsigned i;
@@ -657,10 +663,10 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
        if (page->role.level == PT_PAGE_TABLE_LEVEL) {
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
                        if (pt[i] & PT_PRESENT_MASK)
-                               rmap_remove(vcpu, &pt[i]);
+                               rmap_remove(&pt[i]);
                        pt[i] = 0;
                }
-               kvm_arch_ops->tlb_flush(vcpu);
+               kvm_flush_remote_tlbs(kvm);
                return;
        }
 
@@ -671,18 +677,18 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
                if (!(ent & PT_PRESENT_MASK))
                        continue;
                ent &= PT64_BASE_ADDR_MASK;
-               mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
+               mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
        }
+       kvm_flush_remote_tlbs(kvm);
 }
 
-static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
-                            struct kvm_mmu_page *page,
+static void kvm_mmu_put_page(struct kvm_mmu_page *page,
                             u64 *parent_pte)
 {
-       mmu_page_remove_parent_pte(vcpu, page, parent_pte);
+       mmu_page_remove_parent_pte(page, parent_pte);
 }
 
-static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
+static void kvm_mmu_zap_page(struct kvm *kvm,
                             struct kvm_mmu_page *page)
 {
        u64 *parent_pte;
@@ -698,15 +704,15 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
                        parent_pte = chain->parent_ptes[0];
                }
                BUG_ON(!parent_pte);
-               kvm_mmu_put_page(vcpu, page, parent_pte);
-               *parent_pte = 0;
+               kvm_mmu_put_page(page, parent_pte);
+               set_shadow_pte(parent_pte, 0);
        }
-       kvm_mmu_page_unlink_children(vcpu, page);
+       kvm_mmu_page_unlink_children(kvm, page);
        if (!page->root_count) {
                hlist_del(&page->hash_link);
-               kvm_mmu_free_page(vcpu, page);
+               kvm_mmu_free_page(kvm, page);
        } else
-               list_move(&page->link, &vcpu->kvm->active_mmu_pages);
+               list_move(&page->link, &kvm->active_mmu_pages);
 }
 
 static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -725,12 +731,23 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
                if (page->gfn == gfn && !page->role.metaphysical) {
                        pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
                                 page->role.word);
-                       kvm_mmu_zap_page(vcpu, page);
+                       kvm_mmu_zap_page(vcpu->kvm, page);
                        r = 1;
                }
        return r;
 }
 
+static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       struct kvm_mmu_page *page;
+
+       while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
+               pgprintk("%s: zap %lx %x\n",
+                        __FUNCTION__, gfn, page->role.word);
+               kvm_mmu_zap_page(vcpu->kvm, page);
+       }
+}
+
 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
 {
        int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
@@ -831,11 +848,12 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
        int i;
        struct kvm_mmu_page *page;
 
+       if (!VALID_PAGE(vcpu->mmu.root_hpa))
+               return;
 #ifdef CONFIG_X86_64
        if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->mmu.root_hpa;
 
-               ASSERT(VALID_PAGE(root));
                page = page_header(root);
                --page->root_count;
                vcpu->mmu.root_hpa = INVALID_PAGE;
@@ -846,7 +864,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
                hpa_t root = vcpu->mmu.pae_root[i];
 
                if (root) {
-                       ASSERT(VALID_PAGE(root));
                        root &= PT64_BASE_ADDR_MASK;
                        page = page_header(root);
                        --page->root_count;
@@ -942,9 +959,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
        context->free = nonpaging_free;
        context->root_level = 0;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
-       mmu_alloc_roots(vcpu);
-       ASSERT(VALID_PAGE(context->root_hpa));
-       kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
+       context->root_hpa = INVALID_PAGE;
        return 0;
 }
 
@@ -958,11 +973,6 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
 {
        pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
        mmu_free_roots(vcpu);
-       if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
-               kvm_mmu_free_some_pages(vcpu);
-       mmu_alloc_roots(vcpu);
-       kvm_mmu_flush_tlb(vcpu);
-       kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
 }
 
 static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -996,10 +1006,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
        context->free = paging_free;
        context->root_level = level;
        context->shadow_root_level = level;
-       mmu_alloc_roots(vcpu);
-       ASSERT(VALID_PAGE(context->root_hpa));
-       kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
-                   (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+       context->root_hpa = INVALID_PAGE;
        return 0;
 }
 
@@ -1018,10 +1025,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
        context->free = paging_free;
        context->root_level = PT32_ROOT_LEVEL;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
-       mmu_alloc_roots(vcpu);
-       ASSERT(VALID_PAGE(context->root_hpa));
-       kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
-                   (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+       context->root_hpa = INVALID_PAGE;
        return 0;
 }
 
@@ -1035,7 +1039,6 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
 
-       mmu_topup_memory_caches(vcpu);
        if (!is_paging(vcpu))
                return nonpaging_init_context(vcpu);
        else if (is_long_mode(vcpu))
@@ -1056,17 +1059,32 @@ static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
 }
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
+{
+       destroy_kvm_mmu(vcpu);
+       return init_kvm_mmu(vcpu);
+}
+
+int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
        int r;
 
-       destroy_kvm_mmu(vcpu);
-       r = init_kvm_mmu(vcpu);
-       if (r < 0)
-               goto out;
+       mutex_lock(&vcpu->kvm->lock);
        r = mmu_topup_memory_caches(vcpu);
+       if (r)
+               goto out;
+       mmu_alloc_roots(vcpu);
+       kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+       kvm_mmu_flush_tlb(vcpu);
 out:
+       mutex_unlock(&vcpu->kvm->lock);
        return r;
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_load);
+
+void kvm_mmu_unload(struct kvm_vcpu *vcpu)
+{
+       mmu_free_roots(vcpu);
+}
 
 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
                                  struct kvm_mmu_page *page,
@@ -1078,13 +1096,14 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
        pte = *spte;
        if (is_present_pte(pte)) {
                if (page->role.level == PT_PAGE_TABLE_LEVEL)
-                       rmap_remove(vcpu, spte);
+                       rmap_remove(spte);
                else {
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
-                       mmu_page_remove_parent_pte(vcpu, child, spte);
+                       mmu_page_remove_parent_pte(child, spte);
                }
        }
        *spte = 0;
+       kvm_flush_remote_tlbs(vcpu->kvm);
 }
 
 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
@@ -1102,7 +1121,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *old, const u8 *new, int bytes)
+                      const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm_mmu_page *page;
@@ -1149,7 +1168,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                         */
                        pgprintk("misaligned: gpa %llx bytes %d role %x\n",
                                 gpa, bytes, page->role.word);
-                       kvm_mmu_zap_page(vcpu, page);
+                       kvm_mmu_zap_page(vcpu->kvm, page);
                        continue;
                }
                page_offset = offset;
@@ -1188,17 +1207,16 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
        return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
 }
 
-void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
+void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
        while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
                struct kvm_mmu_page *page;
 
                page = container_of(vcpu->kvm->active_mmu_pages.prev,
                                    struct kvm_mmu_page, link);
-               kvm_mmu_zap_page(vcpu, page);
+               kvm_mmu_zap_page(vcpu->kvm, page);
        }
 }
-EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
 
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
@@ -1207,7 +1225,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
        while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
                page = container_of(vcpu->kvm->active_mmu_pages.next,
                                    struct kvm_mmu_page, link);
-               kvm_mmu_zap_page(vcpu, page);
+               kvm_mmu_zap_page(vcpu->kvm, page);
        }
        free_page((unsigned long)vcpu->mmu.pae_root);
 }
@@ -1265,9 +1283,8 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
        mmu_free_memory_caches(vcpu);
 }
 
-void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
+void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
-       struct kvm *kvm = vcpu->kvm;
        struct kvm_mmu_page *page;
 
        list_for_each_entry(page, &kvm->active_mmu_pages, link) {
@@ -1281,27 +1298,20 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
                        /* avoid RMW */
                        if (pt[i] & PT_WRITABLE_MASK) {
-                               rmap_remove(vcpu, &pt[i]);
+                               rmap_remove(&pt[i]);
                                pt[i] &= ~PT_WRITABLE_MASK;
                        }
        }
 }
 
-void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
+void kvm_mmu_zap_all(struct kvm *kvm)
 {
-       destroy_kvm_mmu(vcpu);
+       struct kvm_mmu_page *page, *node;
 
-       while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
-               struct kvm_mmu_page *page;
-
-               page = container_of(vcpu->kvm->active_mmu_pages.next,
-                                   struct kvm_mmu_page, link);
-               kvm_mmu_zap_page(vcpu, page);
-       }
+       list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
+               kvm_mmu_zap_page(kvm, page);
 
-       mmu_free_memory_caches(vcpu);
-       kvm_arch_ops->tlb_flush(vcpu);
-       init_kvm_mmu(vcpu);
+       kvm_flush_remote_tlbs(kvm);
 }
 
 void kvm_mmu_module_exit(void)
@@ -1310,8 +1320,6 @@ void kvm_mmu_module_exit(void)
                kmem_cache_destroy(pte_chain_cache);
        if (rmap_desc_cache)
                kmem_cache_destroy(rmap_desc_cache);
-       if (mmu_page_cache)
-               kmem_cache_destroy(mmu_page_cache);
        if (mmu_page_header_cache)
                kmem_cache_destroy(mmu_page_header_cache);
 }
@@ -1320,24 +1328,18 @@ int kvm_mmu_module_init(void)
 {
        pte_chain_cache = kmem_cache_create("kvm_pte_chain",
                                            sizeof(struct kvm_pte_chain),
-                                           0, 0, NULL, NULL);
+                                           0, 0, NULL);
        if (!pte_chain_cache)
                goto nomem;
        rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
                                            sizeof(struct kvm_rmap_desc),
-                                           0, 0, NULL, NULL);
+                                           0, 0, NULL);
        if (!rmap_desc_cache)
                goto nomem;
 
-       mmu_page_cache = kmem_cache_create("kvm_mmu_page",
-                                          PAGE_SIZE,
-                                          PAGE_SIZE, 0, NULL, NULL);
-       if (!mmu_page_cache)
-               goto nomem;
-
        mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
                                                  sizeof(struct kvm_mmu_page),
-                                                 0, 0, NULL, NULL);
+                                                 0, 0, NULL);
        if (!mmu_page_header_cache)
                goto nomem;