]> err.no Git - linux-2.6/blobdiff - arch/x86/kvm/mmu.c
KVM: MMU: Move nonpaging_prefetch_page()
[linux-2.6] / arch / x86 / kvm / mmu.c
index baa6503894d3fe902a57fdd73341740cb79b44d4..62741b7c4223adf2deb0fc8deb3fa0939f0f8cc2 100644 (file)
@@ -776,6 +776,15 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
        BUG();
 }
 
+static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
+                                   struct kvm_mmu_page *sp)
+{
+       int i;
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+               sp->spt[i] = shadow_trap_nonpresent_pte;
+}
+
 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
 {
        unsigned index;
@@ -1083,10 +1092,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                struct kvm_mmu_page *shadow;
 
                spte |= PT_WRITABLE_MASK;
-               if (user_fault) {
-                       mmu_unshadow(vcpu->kvm, gfn);
-                       goto unshadowed;
-               }
 
                shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
                if (shadow ||
@@ -1103,8 +1108,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                }
        }
 
-unshadowed:
-
        if (pte_access & ACC_WRITE_MASK)
                mark_page_dirty(vcpu->kvm, gfn);
 
@@ -1128,8 +1131,10 @@ unshadowed:
                else
                        kvm_release_pfn_clean(pfn);
        }
-       if (!ptwrite || !*ptwrite)
+       if (speculative) {
                vcpu->arch.last_pte_updated = shadow_pte;
+               vcpu->arch.last_pte_gfn = gfn;
+       }
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -1217,15 +1222,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
 }
 
 
-static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
-                                   struct kvm_mmu_page *sp)
-{
-       int i;
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
-               sp->spt[i] = shadow_trap_nonpresent_pte;
-}
-
 static void mmu_free_roots(struct kvm_vcpu *vcpu)
 {
        int i;
@@ -1677,6 +1673,18 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        vcpu->arch.update_pte.pfn = pfn;
 }
 
+static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       u64 *spte = vcpu->arch.last_pte_updated;
+
+       if (spte
+           && vcpu->arch.last_pte_gfn == gfn
+           && shadow_accessed_mask
+           && !(*spte & shadow_accessed_mask)
+           && is_shadow_present_pte(*spte))
+               set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+}
+
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                       const u8 *new, int bytes)
 {
@@ -1700,6 +1708,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
        mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
        spin_lock(&vcpu->kvm->mmu_lock);
+       kvm_mmu_access_page(vcpu, gfn);
        kvm_mmu_free_some_pages(vcpu);
        ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, "pre pte write");
@@ -1954,7 +1963,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
        kvm_flush_remote_tlbs(kvm);
 }
 
-void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
 {
        struct kvm_mmu_page *page;