2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
36 #define PT_MAX_FULL_LEVELS 4
38 #define PT_MAX_FULL_LEVELS 2
41 #define pt_element_t u32
42 #define guest_walker guest_walker32
43 #define FNAME(name) paging##32_##name
44 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
50 #define PT_MAX_FULL_LEVELS 2
52 #error Invalid PTTYPE value
56 * The guest_walker structure emulates the behavior of the hardware page
61 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
64 pt_element_t inherited_ar;
68 * Fetch a guest pte for a guest virtual address
70 static void FNAME(walk_addr)(struct guest_walker *walker,
71 struct kvm_vcpu *vcpu, gva_t addr)
74 struct kvm_memory_slot *slot;
79 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
80 walker->level = vcpu->mmu.root_level;
84 if (!is_long_mode(vcpu)) {
85 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
87 if (!(root & PT_PRESENT_MASK))
92 table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
93 walker->table_gfn[walker->level - 1] = table_gfn;
94 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
95 walker->level - 1, table_gfn);
96 slot = gfn_to_memslot(vcpu->kvm, table_gfn);
97 hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
98 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
100 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
101 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
103 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
106 int index = PT_INDEX(addr, walker->level);
109 ptep = &walker->table[index];
110 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
111 ((unsigned long)ptep & PAGE_MASK));
113 if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK))
114 *ptep |= PT_ACCESSED_MASK;
116 if (!is_present_pte(*ptep) ||
117 walker->level == PT_PAGE_TABLE_LEVEL ||
118 (walker->level == PT_DIRECTORY_LEVEL &&
119 (*ptep & PT_PAGE_SIZE_MASK) &&
120 (PTTYPE == 64 || is_pse(vcpu))))
123 if (walker->level != 3 || is_long_mode(vcpu))
124 walker->inherited_ar &= walker->table[index];
125 table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
126 paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
127 kunmap_atomic(walker->table, KM_USER0);
128 walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
131 walker->table_gfn[walker->level - 1 ] = table_gfn;
132 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
133 walker->level - 1, table_gfn);
136 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
139 static void FNAME(release_walker)(struct guest_walker *walker)
142 kunmap_atomic(walker->table, KM_USER0);
145 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
146 u64 *shadow_pte, u64 access_bits)
148 ASSERT(*shadow_pte == 0);
149 access_bits &= guest_pte;
150 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
151 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
152 guest_pte & PT_DIRTY_MASK, access_bits);
155 static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
156 u64 *shadow_pte, u64 access_bits,
161 ASSERT(*shadow_pte == 0);
162 access_bits &= guest_pde;
163 gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index;
164 if (PTTYPE == 32 && is_cpuid_PSE36())
165 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
166 (32 - PT32_DIR_PSE36_SHIFT);
167 *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
168 set_pte_common(vcpu, shadow_pte, gaddr,
169 guest_pde & PT_DIRTY_MASK, access_bits);
173 * Fetch a shadow pte for a specific level in the paging hierarchy.
175 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
176 struct guest_walker *walker)
180 u64 *prev_shadow_ent = NULL;
181 pt_element_t *guest_ent = walker->ptep;
183 if (!is_present_pte(*guest_ent))
186 shadow_addr = vcpu->mmu.root_hpa;
187 level = vcpu->mmu.shadow_root_level;
188 if (level == PT32E_ROOT_LEVEL) {
189 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
190 shadow_addr &= PT64_BASE_ADDR_MASK;
195 u32 index = SHADOW_PT_INDEX(addr, level);
196 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
197 struct kvm_mmu_page *shadow_page;
202 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
203 if (level == PT_PAGE_TABLE_LEVEL)
205 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
206 prev_shadow_ent = shadow_ent;
210 if (level == PT_PAGE_TABLE_LEVEL) {
212 if (walker->level == PT_DIRECTORY_LEVEL) {
214 *prev_shadow_ent |= PT_SHADOW_PS_MARK;
215 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
216 walker->inherited_ar,
217 PT_INDEX(addr, PT_PAGE_TABLE_LEVEL));
219 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
220 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar);
225 if (level - 1 == PT_PAGE_TABLE_LEVEL
226 && walker->level == PT_DIRECTORY_LEVEL) {
228 table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
232 table_gfn = walker->table_gfn[level - 2];
234 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
235 metaphysical, shadow_ent);
237 return ERR_PTR(-ENOMEM);
238 shadow_addr = shadow_page->page_hpa;
239 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
240 | PT_WRITABLE_MASK | PT_USER_MASK;
241 *shadow_ent = shadow_pte;
242 prev_shadow_ent = shadow_ent;
247 * The guest faulted for write. We need to
249 * - check write permissions
250 * - update the guest pte dirty bit
251 * - update our own dirty page tracking structures
253 static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
255 struct guest_walker *walker,
260 pt_element_t *guest_ent;
264 if (is_writeble_pte(*shadow_ent))
267 writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
270 * User mode access. Fail if it's a kernel page or a read-only
273 if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
275 ASSERT(*shadow_ent & PT_USER_MASK);
278 * Kernel mode access. Fail if it's a read-only page and
279 * supervisor write protection is enabled.
281 if (!writable_shadow) {
282 if (is_write_protection(vcpu))
284 *shadow_ent &= ~PT_USER_MASK;
287 guest_ent = walker->ptep;
289 if (!is_present_pte(*guest_ent)) {
294 gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
295 if (kvm_mmu_lookup_page(vcpu, gfn)) {
296 pgprintk("%s: found shadow page for %lx, marking ro\n",
301 mark_page_dirty(vcpu->kvm, gfn);
302 *shadow_ent |= PT_WRITABLE_MASK;
303 *guest_ent |= PT_DIRTY_MASK;
304 rmap_add(vcpu->kvm, shadow_ent);
310 * Page fault handler. There are several causes for a page fault:
311 * - there is no shadow pte for the guest pte
312 * - write access through a shadow pte marked read only so that we can set
314 * - write access to a shadow pte marked read only so we can update the page
315 * dirty bitmap, when userspace requests it
316 * - mmio access; in this case we will never install a present shadow pte
317 * - normal guest page fault due to the guest pte marked not present, not
318 * writable, or not executable
320 * Returns: 1 if we need to emulate the instruction, 0 otherwise
322 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
325 int write_fault = error_code & PFERR_WRITE_MASK;
326 int pte_present = error_code & PFERR_PRESENT_MASK;
327 int user_fault = error_code & PFERR_USER_MASK;
328 struct guest_walker walker;
333 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
335 * Look up the shadow pte for the faulting address.
338 FNAME(walk_addr)(&walker, vcpu, addr);
339 shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
340 if (IS_ERR(shadow_pte)) { /* must be -ENOMEM */
341 printk("%s: oom\n", __FUNCTION__);
342 nonpaging_flush(vcpu);
343 FNAME(release_walker)(&walker);
350 * The page is not mapped by the guest. Let the guest handle it.
353 pgprintk("%s: not mapped\n", __FUNCTION__);
354 inject_page_fault(vcpu, addr, error_code);
355 FNAME(release_walker)(&walker);
359 pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
360 shadow_pte, *shadow_pte);
363 * Update the shadow pte.
366 fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
367 user_fault, &write_pt);
369 fixed = fix_read_pf(shadow_pte);
371 pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
372 shadow_pte, *shadow_pte);
374 FNAME(release_walker)(&walker);
377 * mmio: emulate if accessible, otherwise its a guest fault.
379 if (is_io_pte(*shadow_pte)) {
380 if (may_access(*shadow_pte, write_fault, user_fault))
382 pgprintk("%s: io work, no access\n", __FUNCTION__);
383 inject_page_fault(vcpu, addr,
384 error_code | PFERR_PRESENT_MASK);
389 * pte not present, guest page fault.
391 if (pte_present && !fixed && !write_pt) {
392 inject_page_fault(vcpu, addr, error_code);
401 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
403 struct guest_walker walker;
404 pt_element_t guest_pte;
407 FNAME(walk_addr)(&walker, vcpu, vaddr);
408 guest_pte = *walker.ptep;
409 FNAME(release_walker)(&walker);
411 if (!is_present_pte(guest_pte))
414 if (walker.level == PT_DIRECTORY_LEVEL) {
415 ASSERT((guest_pte & PT_PAGE_SIZE_MASK));
416 ASSERT(PTTYPE == 64 || is_pse(vcpu));
418 gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr &
419 (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK));
421 if (PTTYPE == 32 && is_cpuid_PSE36())
422 gpa |= (guest_pte & PT32_DIR_PSE36_MASK) <<
423 (32 - PT32_DIR_PSE36_SHIFT);
425 gpa = (guest_pte & PT_BASE_ADDR_MASK);
426 gpa |= (vaddr & ~PAGE_MASK);
435 #undef PT_BASE_ADDR_MASK
437 #undef SHADOW_PT_INDEX
439 #undef PT_PTE_COPY_MASK
440 #undef PT_NON_PTE_COPY_MASK
441 #undef PT_DIR_BASE_ADDR_MASK
442 #undef PT_MAX_FULL_LEVELS