pt_element_t *ptep;
pt_element_t inherited_ar;
gfn_t gfn;
+ u32 error_code;
};
/*
* Fetch a guest pte for a guest virtual address
*/
-static void FNAME(walk_addr)(struct guest_walker *walker,
- struct kvm_vcpu *vcpu, gva_t addr)
+static int FNAME(walk_addr)(struct guest_walker *walker,
+ struct kvm_vcpu *vcpu, gva_t addr,
+ int write_fault, int user_fault, int fetch_fault)
{
hpa_t hpa;
struct kvm_memory_slot *slot;
walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
root = *walker->ptep;
if (!(root & PT_PRESENT_MASK))
- return;
+ goto not_present;
--walker->level;
}
#endif
ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
((unsigned long)ptep & PAGE_MASK));
- if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK))
- *ptep |= PT_ACCESSED_MASK;
-
if (!is_present_pte(*ptep))
- break;
+ goto not_present;
+
+ if (write_fault && !is_writeble_pte(*ptep))
+ if (user_fault || is_write_protection(vcpu))
+ goto access_error;
+
+ if (user_fault && !(*ptep & PT_USER_MASK))
+ goto access_error;
+
+#if PTTYPE == 64
+ if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
+ goto access_error;
+#endif
+
+ if (!(*ptep & PT_ACCESSED_MASK)) {
+ mark_page_dirty(vcpu->kvm, table_gfn);
+ *ptep |= PT_ACCESSED_MASK;
+ }
if (walker->level == PT_PAGE_TABLE_LEVEL) {
walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
break;
}
- if (walker->level != 3 || is_long_mode(vcpu))
- walker->inherited_ar &= walker->table[index];
+ walker->inherited_ar &= walker->table[index];
table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
kunmap_atomic(walker->table, KM_USER0);
}
walker->ptep = ptep;
pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
+ return 1;
+
+not_present:
+ walker->error_code = 0;
+ goto err;
+
+access_error:
+ walker->error_code = PFERR_PRESENT_MASK;
+
+err:
+ if (write_fault)
+ walker->error_code |= PFERR_WRITE_MASK;
+ if (user_fault)
+ walker->error_code |= PFERR_USER_MASK;
+ if (fetch_fault)
+ walker->error_code |= PFERR_FETCH_MASK;
+ return 0;
}
static void FNAME(release_walker)(struct guest_walker *walker)
kunmap_atomic(walker->table, KM_USER0);
}
+static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
+ struct guest_walker *walker)
+{
+ mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
+}
+
static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
u64 *shadow_pte, u64 access_bits, gfn_t gfn)
{
u64 shadow_pte;
int metaphysical;
gfn_t table_gfn;
+ unsigned hugepage_access = 0;
if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
if (level == PT_PAGE_TABLE_LEVEL)
if (level - 1 == PT_PAGE_TABLE_LEVEL
&& walker->level == PT_DIRECTORY_LEVEL) {
metaphysical = 1;
+ hugepage_access = *guest_ent;
+ hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
+ hugepage_access >>= PT_WRITABLE_SHIFT;
table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
>> PAGE_SHIFT;
} else {
table_gfn = walker->table_gfn[level - 2];
}
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
- metaphysical, shadow_ent);
+ metaphysical, hugepage_access,
+ shadow_ent);
shadow_addr = shadow_page->page_hpa;
shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
} else if (kvm_mmu_lookup_page(vcpu, gfn)) {
pgprintk("%s: found shadow page for %lx, marking ro\n",
__FUNCTION__, gfn);
+ mark_page_dirty(vcpu->kvm, gfn);
+ FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
*guest_ent |= PT_DIRTY_MASK;
*write_pt = 1;
return 0;
}
mark_page_dirty(vcpu->kvm, gfn);
*shadow_ent |= PT_WRITABLE_MASK;
+ FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
*guest_ent |= PT_DIRTY_MASK;
rmap_add(vcpu, shadow_ent);
u32 error_code)
{
int write_fault = error_code & PFERR_WRITE_MASK;
- int pte_present = error_code & PFERR_PRESENT_MASK;
int user_fault = error_code & PFERR_USER_MASK;
+ int fetch_fault = error_code & PFERR_FETCH_MASK;
struct guest_walker walker;
u64 *shadow_pte;
int fixed;
/*
* Look up the shadow pte for the faulting address.
*/
- FNAME(walk_addr)(&walker, vcpu, addr);
- shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
+ r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
+ fetch_fault);
/*
* The page is not mapped by the guest. Let the guest handle it.
*/
- if (!shadow_pte) {
- pgprintk("%s: not mapped\n", __FUNCTION__);
- inject_page_fault(vcpu, addr, error_code);
+ if (!r) {
+ pgprintk("%s: guest page fault\n", __FUNCTION__);
+ inject_page_fault(vcpu, addr, walker.error_code);
FNAME(release_walker)(&walker);
return 0;
}
+ shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
shadow_pte, *shadow_pte);
/*
* mmio: emulate if accessible, otherwise its a guest fault.
*/
- if (is_io_pte(*shadow_pte)) {
- if (may_access(*shadow_pte, write_fault, user_fault))
- return 1;
- pgprintk("%s: io work, no access\n", __FUNCTION__);
- inject_page_fault(vcpu, addr,
- error_code | PFERR_PRESENT_MASK);
- kvm_mmu_audit(vcpu, "post page fault (io)");
- return 0;
- }
+ if (is_io_pte(*shadow_pte))
+ return 1;
- /*
- * pte not present, guest page fault.
- */
- if (pte_present && !fixed && !write_pt) {
- inject_page_fault(vcpu, addr, error_code);
- kvm_mmu_audit(vcpu, "post page fault (guest)");
- return 0;
- }
-
- ++kvm_stat.pf_fixed;
+ ++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)");
return write_pt;
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
{
struct guest_walker walker;
- pt_element_t guest_pte;
- gpa_t gpa;
-
- FNAME(walk_addr)(&walker, vcpu, vaddr);
- guest_pte = *walker.ptep;
- FNAME(release_walker)(&walker);
-
- if (!is_present_pte(guest_pte))
- return UNMAPPED_GVA;
-
- if (walker.level == PT_DIRECTORY_LEVEL) {
- ASSERT((guest_pte & PT_PAGE_SIZE_MASK));
- ASSERT(PTTYPE == 64 || is_pse(vcpu));
+ gpa_t gpa = UNMAPPED_GVA;
+ int r;
- gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr &
- (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK));
+ r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
- if (PTTYPE == 32 && is_cpuid_PSE36())
- gpa |= (guest_pte & PT32_DIR_PSE36_MASK) <<
- (32 - PT32_DIR_PSE36_SHIFT);
- } else {
- gpa = (guest_pte & PT_BASE_ADDR_MASK);
- gpa |= (vaddr & ~PAGE_MASK);
+ if (r) {
+ gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
+ gpa |= vaddr & ~PAGE_MASK;
}
+ FNAME(release_walker)(&walker);
return gpa;
}