mmu_page_remove_parent_pte(page, parent_pte);
}
+static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
+{
+ int i;
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i)
+ if (kvm->vcpus[i])
+ kvm->vcpus[i]->last_pte_updated = NULL;
+}
+
static void kvm_mmu_zap_page(struct kvm *kvm,
struct kvm_mmu_page *page)
{
kvm_mmu_free_page(kvm, page);
} else
list_move(&page->link, &kvm->active_mmu_pages);
+ kvm_mmu_reset_last_pte_updated(kvm);
}
static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
offset_in_pte);
}
+static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
+{
+ u64 *spte = vcpu->last_pte_updated;
+
+ return !!(spte && (*spte & PT_ACCESSED_MASK));
+}
+
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes)
{
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
kvm_mmu_audit(vcpu, "pre pte write");
- if (gfn == vcpu->last_pt_write_gfn) {
+ if (gfn == vcpu->last_pt_write_gfn
+ && !last_updated_pte_accessed(vcpu)) {
++vcpu->last_pt_write_count;
if (vcpu->last_pt_write_count >= 3)
flooded = 1;
} else {
vcpu->last_pt_write_gfn = gfn;
vcpu->last_pt_write_count = 1;
+ vcpu->last_pte_updated = NULL;
}
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &vcpu->kvm->mmu_page_hash[index];
FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
}
- spte = PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK;
+ /*
+ * We don't set the accessed bit, since we sometimes want to see
+ * whether the guest actually used the pte (in order to detect
+ * demand paging).
+ */
+ spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
spte |= gpte & PT64_NX_MASK;
if (!dirty)
access_bits &= ~PT_WRITABLE_MASK;
page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
if (!was_rmapped)
rmap_add(vcpu, shadow_pte);
+ if (!ptwrite || !*ptwrite)
+ vcpu->last_pte_updated = shadow_pte;
}
static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,