]> err.no Git - linux-2.6/blobdiff - arch/x86/kvm/mmu.c
Merge branches 'at91', 'dyntick', 'ep93xx', 'iop', 'ixp', 'misc', 'orion', 'omap...
[linux-2.6] / arch / x86 / kvm / mmu.c
index c28a36b4cbba0fc4be18ace782b9f98ce2509a38..7e7c3969f7a2d01f0f9d2d2542b046fc9d0aa157 100644 (file)
@@ -376,7 +376,6 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
 
        write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
        *write_count += 1;
-       WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
 }
 
 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -641,6 +640,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
                        rmap_remove(kvm, spte);
                        --kvm->stat.lpages;
                        set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+                       spte = NULL;
                        write_protected = 1;
                }
                spte = rmap_next(kvm, rmapp, spte);
@@ -659,7 +659,7 @@ static int is_empty_shadow_page(u64 *spt)
        u64 *end;
 
        for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
-               if (*pos != shadow_trap_nonpresent_pte) {
+               if (is_shadow_present_pte(*pos)) {
                        printk(KERN_ERR "%s: %p %llx\n", __func__,
                               pos, *pos);
                        return 0;
@@ -1083,10 +1083,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                struct kvm_mmu_page *shadow;
 
                spte |= PT_WRITABLE_MASK;
-               if (user_fault) {
-                       mmu_unshadow(vcpu->kvm, gfn);
-                       goto unshadowed;
-               }
 
                shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
                if (shadow ||
@@ -1103,8 +1099,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                }
        }
 
-unshadowed:
-
        if (pte_access & ACC_WRITE_MASK)
                mark_page_dirty(vcpu->kvm, gfn);
 
@@ -1177,8 +1171,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                                return -ENOMEM;
                        }
 
-                       table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
-                               | PT_WRITABLE_MASK | shadow_user_mask;
+                       table[index] = __pa(new_table->spt)
+                               | PT_PRESENT_MASK | PT_WRITABLE_MASK
+                               | shadow_user_mask | shadow_x_mask;
                }
                table_addr = table[index] & PT64_BASE_ADDR_MASK;
        }
@@ -1233,7 +1228,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
        spin_lock(&vcpu->kvm->mmu_lock);
-#ifdef CONFIG_X86_64
        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
 
@@ -1245,7 +1239,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
                spin_unlock(&vcpu->kvm->mmu_lock);
                return;
        }
-#endif
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
@@ -1271,7 +1264,6 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 
        root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
 
-#ifdef CONFIG_X86_64
        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
 
@@ -1286,7 +1278,6 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                vcpu->arch.mmu.root_hpa = root;
                return;
        }
-#endif
        metaphysical = !is_paging(vcpu);
        if (tdp_enabled)
                metaphysical = 1;
@@ -1584,11 +1575,13 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                                  u64 *spte,
                                  const void *new)
 {
-       if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
-           && !vcpu->arch.update_pte.largepage) {
-               ++vcpu->kvm->stat.mmu_pde_zapped;
-               return;
-       }
+       if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
+               if (!vcpu->arch.update_pte.largepage ||
+                   sp->role.glevels == PT32_ROOT_LEVEL) {
+                       ++vcpu->kvm->stat.mmu_pde_zapped;
+                       return;
+               }
+        }
 
        ++vcpu->kvm->stat.mmu_pte_updated;
        if (sp->role.glevels == PT32_ROOT_LEVEL)
@@ -1862,6 +1855,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
                sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
                                  struct kvm_mmu_page, link);
                kvm_mmu_zap_page(vcpu->kvm, sp);
+               cond_resched();
        }
        free_page((unsigned long)vcpu->arch.mmu.pae_root);
 }
@@ -2000,7 +1994,7 @@ static struct shrinker mmu_shrinker = {
        .seeks = DEFAULT_SEEKS * 10,
 };
 
-void mmu_destroy_caches(void)
+static void mmu_destroy_caches(void)
 {
        if (pte_chain_cache)
                kmem_cache_destroy(pte_chain_cache);