]> err.no Git - linux-2.6/blobdiff - arch/x86/kvm/mmu.c
Merge branch 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6] / arch / x86 / kvm / mmu.c
index 5ebb2788bd73cad5246d6993f2ce4d3fea6fc9ae..2fa231923cf7e90d9e969478f22207ba0040cd57 100644 (file)
@@ -66,7 +66,8 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
 #endif
 
 #if defined(MMU_DEBUG) || defined(AUDIT)
-static int dbg = 1;
+static int dbg = 0;
+module_param(dbg, bool, 0644);
 #endif
 
 #ifndef MMU_DEBUG
@@ -929,14 +930,17 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
        }
        kvm_mmu_page_unlink_children(kvm, sp);
        if (!sp->root_count) {
-               if (!sp->role.metaphysical)
+               if (!sp->role.metaphysical && !sp->role.invalid)
                        unaccount_shadowed(kvm, sp->gfn);
                hlist_del(&sp->hash_link);
                kvm_mmu_free_page(kvm, sp);
        } else {
+               int invalid = sp->role.invalid;
                list_move(&sp->link, &kvm->arch.active_mmu_pages);
                sp->role.invalid = 1;
                kvm_reload_remote_mmus(kvm);
+               if (!sp->role.metaphysical && !invalid)
+                       unaccount_shadowed(kvm, sp->gfn);
        }
        kvm_mmu_reset_last_pte_updated(kvm);
 }
@@ -1115,7 +1119,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                mark_page_dirty(vcpu->kvm, gfn);
 
        pgprintk("%s: setting spte %llx\n", __func__, spte);
-       pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
+       pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
                 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
                 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
        set_shadow_pte(shadow_pte, spte);
@@ -1185,9 +1189,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                                return -ENOMEM;
                        }
 
-                       table[index] = __pa(new_table->spt)
-                               | PT_PRESENT_MASK | PT_WRITABLE_MASK
-                               | shadow_user_mask | shadow_x_mask;
+                       set_shadow_pte(&table[index],
+                                      __pa(new_table->spt)
+                                      | PT_PRESENT_MASK | PT_WRITABLE_MASK
+                                      | shadow_user_mask | shadow_x_mask);
                }
                table_addr = table[index] & PT64_BASE_ADDR_MASK;
        }
@@ -1809,6 +1814,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
        spin_unlock(&vcpu->kvm->mmu_lock);
        return r;
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
@@ -1865,6 +1871,12 @@ void kvm_enable_tdp(void)
 }
 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
 
+void kvm_disable_tdp(void)
+{
+       tdp_enabled = false;
+}
+EXPORT_SYMBOL_GPL(kvm_disable_tdp);
+
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu_page *sp;
@@ -1986,6 +1998,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
        list_for_each_entry(kvm, &vm_list, vm_list) {
                int npages;
 
+               if (!down_read_trylock(&kvm->slots_lock))
+                       continue;
                spin_lock(&kvm->mmu_lock);
                npages = kvm->arch.n_alloc_mmu_pages -
                         kvm->arch.n_free_mmu_pages;
@@ -1998,6 +2012,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
                nr_to_scan--;
 
                spin_unlock(&kvm->mmu_lock);
+               up_read(&kvm->slots_lock);
        }
        if (kvm_freed)
                list_move_tail(&kvm_freed->vm_list, &vm_list);