]> err.no Git - linux-2.6/blob - drivers/kvm/paging_tmpl.h
KVM: Move shadow pte modifications from set_pte/set_pde to set_pde_common()
[linux-2.6] / drivers / kvm / paging_tmpl.h
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 /*
21  * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22  * so the code in this file is compiled twice, once per pte size.
23  */
24
25 #if PTTYPE == 64
26         #define pt_element_t u64
27         #define guest_walker guest_walker64
28         #define FNAME(name) paging##64_##name
29         #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30         #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31         #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32         #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33         #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34         #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35         #ifdef CONFIG_X86_64
36         #define PT_MAX_FULL_LEVELS 4
37         #else
38         #define PT_MAX_FULL_LEVELS 2
39         #endif
40 #elif PTTYPE == 32
41         #define pt_element_t u32
42         #define guest_walker guest_walker32
43         #define FNAME(name) paging##32_##name
44         #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45         #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46         #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47         #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48         #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49         #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
50         #define PT_MAX_FULL_LEVELS 2
51 #else
52         #error Invalid PTTYPE value
53 #endif
54
55 /*
56  * The guest_walker structure emulates the behavior of the hardware page
57  * table walker.
58  */
59 struct guest_walker {
60         int level;
61         gfn_t table_gfn[PT_MAX_FULL_LEVELS];
62         pt_element_t *table;
63         pt_element_t *ptep;
64         pt_element_t inherited_ar;
65         gfn_t gfn;
66         u32 error_code;
67 };
68
69 /*
70  * Fetch a guest pte for a guest virtual address
71  */
72 static int FNAME(walk_addr)(struct guest_walker *walker,
73                             struct kvm_vcpu *vcpu, gva_t addr,
74                             int write_fault, int user_fault, int fetch_fault)
75 {
76         hpa_t hpa;
77         struct kvm_memory_slot *slot;
78         pt_element_t *ptep;
79         pt_element_t root;
80         gfn_t table_gfn;
81
82         pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
83         walker->level = vcpu->mmu.root_level;
84         walker->table = NULL;
85         root = vcpu->cr3;
86 #if PTTYPE == 64
87         if (!is_long_mode(vcpu)) {
88                 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
89                 root = *walker->ptep;
90                 if (!(root & PT_PRESENT_MASK))
91                         goto not_present;
92                 --walker->level;
93         }
94 #endif
95         table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
96         walker->table_gfn[walker->level - 1] = table_gfn;
97         pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
98                  walker->level - 1, table_gfn);
99         slot = gfn_to_memslot(vcpu->kvm, table_gfn);
100         hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
101         walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
102
103         ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
104                (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
105
106         walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
107
108         for (;;) {
109                 int index = PT_INDEX(addr, walker->level);
110                 hpa_t paddr;
111
112                 ptep = &walker->table[index];
113                 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
114                        ((unsigned long)ptep & PAGE_MASK));
115
116                 if (!is_present_pte(*ptep))
117                         goto not_present;
118
119                 if (write_fault && !is_writeble_pte(*ptep))
120                         if (user_fault || is_write_protection(vcpu))
121                                 goto access_error;
122
123                 if (user_fault && !(*ptep & PT_USER_MASK))
124                         goto access_error;
125
126 #if PTTYPE == 64
127                 if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
128                         goto access_error;
129 #endif
130
131                 if (!(*ptep & PT_ACCESSED_MASK)) {
132                         mark_page_dirty(vcpu->kvm, table_gfn);
133                         *ptep |= PT_ACCESSED_MASK;
134                 }
135
136                 if (walker->level == PT_PAGE_TABLE_LEVEL) {
137                         walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
138                                 >> PAGE_SHIFT;
139                         break;
140                 }
141
142                 if (walker->level == PT_DIRECTORY_LEVEL
143                     && (*ptep & PT_PAGE_SIZE_MASK)
144                     && (PTTYPE == 64 || is_pse(vcpu))) {
145                         walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
146                                 >> PAGE_SHIFT;
147                         walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
148                         break;
149                 }
150
151                 walker->inherited_ar &= walker->table[index];
152                 table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
153                 paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
154                 kunmap_atomic(walker->table, KM_USER0);
155                 walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
156                                             KM_USER0);
157                 --walker->level;
158                 walker->table_gfn[walker->level - 1 ] = table_gfn;
159                 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
160                          walker->level - 1, table_gfn);
161         }
162         walker->ptep = ptep;
163         pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
164         return 1;
165
166 not_present:
167         walker->error_code = 0;
168         goto err;
169
170 access_error:
171         walker->error_code = PFERR_PRESENT_MASK;
172
173 err:
174         if (write_fault)
175                 walker->error_code |= PFERR_WRITE_MASK;
176         if (user_fault)
177                 walker->error_code |= PFERR_USER_MASK;
178         if (fetch_fault)
179                 walker->error_code |= PFERR_FETCH_MASK;
180         return 0;
181 }
182
183 static void FNAME(release_walker)(struct guest_walker *walker)
184 {
185         if (walker->table)
186                 kunmap_atomic(walker->table, KM_USER0);
187 }
188
189 static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
190                                         struct guest_walker *walker)
191 {
192         mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
193 }
194
195 static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
196                                   u64 *shadow_pte,
197                                   gpa_t gaddr,
198                                   pt_element_t *gpte,
199                                   u64 access_bits,
200                                   int user_fault,
201                                   int write_fault,
202                                   int *ptwrite,
203                                   struct guest_walker *walker,
204                                   gfn_t gfn)
205 {
206         hpa_t paddr;
207         int dirty = *gpte & PT_DIRTY_MASK;
208         int was_rmapped = is_rmap_pte(*shadow_pte);
209
210         pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
211                  " user_fault %d gfn %lx\n",
212                  __FUNCTION__, *shadow_pte, (u64)*gpte, access_bits,
213                  write_fault, user_fault, gfn);
214
215         if (write_fault && !dirty) {
216                 *gpte |= PT_DIRTY_MASK;
217                 dirty = 1;
218                 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
219         }
220
221         *shadow_pte |= *gpte & PT_PTE_COPY_MASK;
222         *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
223         if (!dirty)
224                 access_bits &= ~PT_WRITABLE_MASK;
225
226         paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
227
228         *shadow_pte |= PT_PRESENT_MASK;
229         if (access_bits & PT_USER_MASK)
230                 *shadow_pte |= PT_USER_MASK;
231
232         if (is_error_hpa(paddr)) {
233                 *shadow_pte |= gaddr;
234                 *shadow_pte |= PT_SHADOW_IO_MARK;
235                 *shadow_pte &= ~PT_PRESENT_MASK;
236                 return;
237         }
238
239         *shadow_pte |= paddr;
240
241         if (!write_fault && (*shadow_pte & PT_SHADOW_USER_MASK) &&
242             !(*shadow_pte & PT_USER_MASK)) {
243                 /*
244                  * If supervisor write protect is disabled, we shadow kernel
245                  * pages as user pages so we can trap the write access.
246                  */
247                 *shadow_pte |= PT_USER_MASK;
248                 *shadow_pte &= ~PT_WRITABLE_MASK;
249                 access_bits &= ~PT_WRITABLE_MASK;
250         }
251
252         if ((access_bits & PT_WRITABLE_MASK)
253             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
254                 struct kvm_mmu_page *shadow;
255
256                 *shadow_pte |= PT_WRITABLE_MASK;
257                 if (user_fault) {
258                         mmu_unshadow(vcpu, gfn);
259                         goto unshadowed;
260                 }
261
262                 shadow = kvm_mmu_lookup_page(vcpu, gfn);
263                 if (shadow) {
264                         pgprintk("%s: found shadow page for %lx, marking ro\n",
265                                  __FUNCTION__, gfn);
266                         access_bits &= ~PT_WRITABLE_MASK;
267                         if (is_writeble_pte(*shadow_pte)) {
268                                 *shadow_pte &= ~PT_WRITABLE_MASK;
269                                 kvm_arch_ops->tlb_flush(vcpu);
270                         }
271                         if (write_fault)
272                                 *ptwrite = 1;
273                 }
274         }
275
276 unshadowed:
277
278         if (access_bits & PT_WRITABLE_MASK)
279                 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
280
281         page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
282         if (!was_rmapped)
283                 rmap_add(vcpu, shadow_pte);
284 }
285
286 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte,
287                            u64 *shadow_pte, u64 access_bits,
288                            int user_fault, int write_fault, int *ptwrite,
289                            struct guest_walker *walker, gfn_t gfn)
290 {
291         access_bits &= *gpte;
292         FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK,
293                               gpte, access_bits, user_fault, write_fault,
294                               ptwrite, walker, gfn);
295 }
296
297 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
298                               u64 *spte, const void *pte, int bytes)
299 {
300         pt_element_t gpte;
301
302         if (bytes < sizeof(pt_element_t))
303                 return;
304         gpte = *(const pt_element_t *)pte;
305         if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
306                 return;
307         pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
308         FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
309                        0, NULL, NULL,
310                        (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
311 }
312
313 static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde,
314                            u64 *shadow_pte, u64 access_bits,
315                            int user_fault, int write_fault, int *ptwrite,
316                            struct guest_walker *walker, gfn_t gfn)
317 {
318         gpa_t gaddr;
319
320         access_bits &= *gpde;
321         gaddr = (gpa_t)gfn << PAGE_SHIFT;
322         if (PTTYPE == 32 && is_cpuid_PSE36())
323                 gaddr |= (*gpde & PT32_DIR_PSE36_MASK) <<
324                         (32 - PT32_DIR_PSE36_SHIFT);
325         FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
326                               gpde, access_bits, user_fault, write_fault,
327                               ptwrite, walker, gfn);
328 }
329
330 /*
331  * Fetch a shadow pte for a specific level in the paging hierarchy.
332  */
333 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
334                          struct guest_walker *walker,
335                          int user_fault, int write_fault, int *ptwrite)
336 {
337         hpa_t shadow_addr;
338         int level;
339         u64 *shadow_ent;
340         u64 *prev_shadow_ent = NULL;
341         pt_element_t *guest_ent = walker->ptep;
342
343         if (!is_present_pte(*guest_ent))
344                 return NULL;
345
346         shadow_addr = vcpu->mmu.root_hpa;
347         level = vcpu->mmu.shadow_root_level;
348         if (level == PT32E_ROOT_LEVEL) {
349                 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
350                 shadow_addr &= PT64_BASE_ADDR_MASK;
351                 --level;
352         }
353
354         for (; ; level--) {
355                 u32 index = SHADOW_PT_INDEX(addr, level);
356                 struct kvm_mmu_page *shadow_page;
357                 u64 shadow_pte;
358                 int metaphysical;
359                 gfn_t table_gfn;
360                 unsigned hugepage_access = 0;
361
362                 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
363                 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
364                         if (level == PT_PAGE_TABLE_LEVEL)
365                                 break;
366                         shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
367                         prev_shadow_ent = shadow_ent;
368                         continue;
369                 }
370
371                 if (level == PT_PAGE_TABLE_LEVEL)
372                         break;
373
374                 if (level - 1 == PT_PAGE_TABLE_LEVEL
375                     && walker->level == PT_DIRECTORY_LEVEL) {
376                         metaphysical = 1;
377                         hugepage_access = *guest_ent;
378                         hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
379                         hugepage_access >>= PT_WRITABLE_SHIFT;
380                         table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
381                                 >> PAGE_SHIFT;
382                 } else {
383                         metaphysical = 0;
384                         table_gfn = walker->table_gfn[level - 2];
385                 }
386                 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
387                                                metaphysical, hugepage_access,
388                                                shadow_ent);
389                 shadow_addr = __pa(shadow_page->spt);
390                 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
391                         | PT_WRITABLE_MASK | PT_USER_MASK;
392                 *shadow_ent = shadow_pte;
393                 prev_shadow_ent = shadow_ent;
394         }
395
396         if (walker->level == PT_DIRECTORY_LEVEL) {
397                 if (prev_shadow_ent)
398                         *prev_shadow_ent |= PT_SHADOW_PS_MARK;
399                 FNAME(set_pde)(vcpu, guest_ent, shadow_ent,
400                                walker->inherited_ar, user_fault, write_fault,
401                                ptwrite, walker, walker->gfn);
402         } else {
403                 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
404                 FNAME(set_pte)(vcpu, guest_ent, shadow_ent,
405                                walker->inherited_ar, user_fault, write_fault,
406                                ptwrite, walker, walker->gfn);
407         }
408         return shadow_ent;
409 }
410
411 /*
412  * Page fault handler.  There are several causes for a page fault:
413  *   - there is no shadow pte for the guest pte
414  *   - write access through a shadow pte marked read only so that we can set
415  *     the dirty bit
416  *   - write access to a shadow pte marked read only so we can update the page
417  *     dirty bitmap, when userspace requests it
418  *   - mmio access; in this case we will never install a present shadow pte
419  *   - normal guest page fault due to the guest pte marked not present, not
420  *     writable, or not executable
421  *
422  *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
423  *           a negative value on error.
424  */
425 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
426                                u32 error_code)
427 {
428         int write_fault = error_code & PFERR_WRITE_MASK;
429         int user_fault = error_code & PFERR_USER_MASK;
430         int fetch_fault = error_code & PFERR_FETCH_MASK;
431         struct guest_walker walker;
432         u64 *shadow_pte;
433         int write_pt = 0;
434         int r;
435
436         pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
437         kvm_mmu_audit(vcpu, "pre page fault");
438
439         r = mmu_topup_memory_caches(vcpu);
440         if (r)
441                 return r;
442
443         /*
444          * Look up the shadow pte for the faulting address.
445          */
446         r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
447                              fetch_fault);
448
449         /*
450          * The page is not mapped by the guest.  Let the guest handle it.
451          */
452         if (!r) {
453                 pgprintk("%s: guest page fault\n", __FUNCTION__);
454                 inject_page_fault(vcpu, addr, walker.error_code);
455                 FNAME(release_walker)(&walker);
456                 vcpu->last_pt_write_count = 0; /* reset fork detector */
457                 return 0;
458         }
459
460         shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
461                                   &write_pt);
462         pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
463                  shadow_pte, *shadow_pte, write_pt);
464
465         FNAME(release_walker)(&walker);
466
467         if (!write_pt)
468                 vcpu->last_pt_write_count = 0; /* reset fork detector */
469
470         /*
471          * mmio: emulate if accessible, otherwise its a guest fault.
472          */
473         if (is_io_pte(*shadow_pte))
474                 return 1;
475
476         ++vcpu->stat.pf_fixed;
477         kvm_mmu_audit(vcpu, "post page fault (fixed)");
478
479         return write_pt;
480 }
481
482 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
483 {
484         struct guest_walker walker;
485         gpa_t gpa = UNMAPPED_GVA;
486         int r;
487
488         r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
489
490         if (r) {
491                 gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
492                 gpa |= vaddr & ~PAGE_MASK;
493         }
494
495         FNAME(release_walker)(&walker);
496         return gpa;
497 }
498
499 #undef pt_element_t
500 #undef guest_walker
501 #undef FNAME
502 #undef PT_BASE_ADDR_MASK
503 #undef PT_INDEX
504 #undef SHADOW_PT_INDEX
505 #undef PT_LEVEL_MASK
506 #undef PT_PTE_COPY_MASK
507 #undef PT_NON_PTE_COPY_MASK
508 #undef PT_DIR_BASE_ADDR_MASK
509 #undef PT_MAX_FULL_LEVELS