2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/types.h>
20 #include <linux/string.h>
23 #include <linux/highmem.h>
24 #include <linux/module.h>
29 #define pgprintk(x...) do { printk(x); } while (0)
30 #define rmap_printk(x...) do { printk(x); } while (0)
34 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
35 __FILE__, __LINE__, #x); \
38 #define PT64_PT_BITS 9
39 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
40 #define PT32_PT_BITS 10
41 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
43 #define PT_WRITABLE_SHIFT 1
45 #define PT_PRESENT_MASK (1ULL << 0)
46 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
47 #define PT_USER_MASK (1ULL << 2)
48 #define PT_PWT_MASK (1ULL << 3)
49 #define PT_PCD_MASK (1ULL << 4)
50 #define PT_ACCESSED_MASK (1ULL << 5)
51 #define PT_DIRTY_MASK (1ULL << 6)
52 #define PT_PAGE_SIZE_MASK (1ULL << 7)
53 #define PT_PAT_MASK (1ULL << 7)
54 #define PT_GLOBAL_MASK (1ULL << 8)
55 #define PT64_NX_MASK (1ULL << 63)
57 #define PT_PAT_SHIFT 7
58 #define PT_DIR_PAT_SHIFT 12
59 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
61 #define PT32_DIR_PSE36_SIZE 4
62 #define PT32_DIR_PSE36_SHIFT 13
63 #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
66 #define PT32_PTE_COPY_MASK \
67 (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
69 #define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
71 #define PT_FIRST_AVAIL_BITS_SHIFT 9
72 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
74 #define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
75 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
77 #define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
78 #define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
80 #define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
81 #define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
83 #define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
85 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
87 #define PT64_LEVEL_BITS 9
89 #define PT64_LEVEL_SHIFT(level) \
90 ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
92 #define PT64_LEVEL_MASK(level) \
93 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
95 #define PT64_INDEX(address, level)\
96 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
99 #define PT32_LEVEL_BITS 10
101 #define PT32_LEVEL_SHIFT(level) \
102 ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
104 #define PT32_LEVEL_MASK(level) \
105 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
107 #define PT32_INDEX(address, level)\
108 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
111 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
112 #define PT64_DIR_BASE_ADDR_MASK \
113 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
115 #define PT32_BASE_ADDR_MASK PAGE_MASK
116 #define PT32_DIR_BASE_ADDR_MASK \
117 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
120 #define PFERR_PRESENT_MASK (1U << 0)
121 #define PFERR_WRITE_MASK (1U << 1)
122 #define PFERR_USER_MASK (1U << 2)
124 #define PT64_ROOT_LEVEL 4
125 #define PT32_ROOT_LEVEL 2
126 #define PT32E_ROOT_LEVEL 3
128 #define PT_DIRECTORY_LEVEL 2
129 #define PT_PAGE_TABLE_LEVEL 1
133 struct kvm_rmap_desc {
134 u64 *shadow_ptes[RMAP_EXT];
135 struct kvm_rmap_desc *more;
138 static int is_write_protection(struct kvm_vcpu *vcpu)
140 return vcpu->cr0 & CR0_WP_MASK;
143 static int is_cpuid_PSE36(void)
148 static int is_present_pte(unsigned long pte)
150 return pte & PT_PRESENT_MASK;
153 static int is_writeble_pte(unsigned long pte)
155 return pte & PT_WRITABLE_MASK;
158 static int is_io_pte(unsigned long pte)
160 return pte & PT_SHADOW_IO_MARK;
163 static int is_rmap_pte(u64 pte)
165 return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
166 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
170 * Reverse mapping data structures:
172 * If page->private bit zero is zero, then page->private points to the
173 * shadow page table entry that points to page_address(page).
175 * If page->private bit zero is one, (then page->private & ~1) points
176 * to a struct kvm_rmap_desc containing more mappings.
178 static void rmap_add(struct kvm *kvm, u64 *spte)
181 struct kvm_rmap_desc *desc;
184 if (!is_rmap_pte(*spte))
186 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
187 if (!page->private) {
188 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
189 page->private = (unsigned long)spte;
190 } else if (!(page->private & 1)) {
191 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
192 desc = kzalloc(sizeof *desc, GFP_NOWAIT);
194 BUG(); /* FIXME: return error */
195 desc->shadow_ptes[0] = (u64 *)page->private;
196 desc->shadow_ptes[1] = spte;
197 page->private = (unsigned long)desc | 1;
199 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
200 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
201 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
203 if (desc->shadow_ptes[RMAP_EXT-1]) {
204 desc->more = kzalloc(sizeof *desc->more, GFP_NOWAIT);
206 BUG(); /* FIXME: return error */
209 for (i = 0; desc->shadow_ptes[i]; ++i)
211 desc->shadow_ptes[i] = spte;
215 static void rmap_desc_remove_entry(struct page *page,
216 struct kvm_rmap_desc *desc,
218 struct kvm_rmap_desc *prev_desc)
222 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
224 desc->shadow_ptes[i] = desc->shadow_ptes[j];
225 desc->shadow_ptes[j] = 0;
228 if (!prev_desc && !desc->more)
229 page->private = (unsigned long)desc->shadow_ptes[0];
232 prev_desc->more = desc->more;
234 page->private = (unsigned long)desc->more | 1;
238 static void rmap_remove(struct kvm *kvm, u64 *spte)
241 struct kvm_rmap_desc *desc;
242 struct kvm_rmap_desc *prev_desc;
245 if (!is_rmap_pte(*spte))
247 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
248 if (!page->private) {
249 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
251 } else if (!(page->private & 1)) {
252 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
253 if ((u64 *)page->private != spte) {
254 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
260 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
261 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
264 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
265 if (desc->shadow_ptes[i] == spte) {
266 rmap_desc_remove_entry(page, desc, i,
277 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
280 struct kvm_memory_slot *slot;
281 struct kvm_rmap_desc *desc;
284 slot = gfn_to_memslot(kvm, gfn);
286 page = gfn_to_page(slot, gfn);
288 while (page->private) {
289 if (!(page->private & 1))
290 spte = (u64 *)page->private;
292 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
293 spte = desc->shadow_ptes[0];
296 BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
297 page_to_pfn(page) << PAGE_SHIFT);
298 BUG_ON(!(*spte & PT_PRESENT_MASK));
299 BUG_ON(!(*spte & PT_WRITABLE_MASK));
300 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
301 rmap_remove(kvm, spte);
302 *spte &= ~(u64)PT_WRITABLE_MASK;
306 static int is_empty_shadow_page(hpa_t page_hpa)
311 for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
314 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
321 static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
323 struct kvm_mmu_page *page_head = page_header(page_hpa);
325 ASSERT(is_empty_shadow_page(page_hpa));
326 list_del(&page_head->link);
327 page_head->page_hpa = page_hpa;
328 list_add(&page_head->link, &vcpu->free_pages);
329 ++vcpu->kvm->n_free_mmu_pages;
332 static unsigned kvm_page_table_hashfn(gfn_t gfn)
337 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
340 struct kvm_mmu_page *page;
342 if (list_empty(&vcpu->free_pages))
345 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
346 list_del(&page->link);
347 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
348 ASSERT(is_empty_shadow_page(page->page_hpa));
349 page->slot_bitmap = 0;
351 page->multimapped = 0;
352 page->parent_pte = parent_pte;
353 --vcpu->kvm->n_free_mmu_pages;
357 static void mmu_page_add_parent_pte(struct kvm_mmu_page *page, u64 *parent_pte)
359 struct kvm_pte_chain *pte_chain;
360 struct hlist_node *node;
365 if (!page->multimapped) {
366 u64 *old = page->parent_pte;
369 page->parent_pte = parent_pte;
372 page->multimapped = 1;
373 pte_chain = kzalloc(sizeof(struct kvm_pte_chain), GFP_NOWAIT);
375 INIT_HLIST_HEAD(&page->parent_ptes);
376 hlist_add_head(&pte_chain->link, &page->parent_ptes);
377 pte_chain->parent_ptes[0] = old;
379 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
380 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
382 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
383 if (!pte_chain->parent_ptes[i]) {
384 pte_chain->parent_ptes[i] = parent_pte;
388 pte_chain = kzalloc(sizeof(struct kvm_pte_chain), GFP_NOWAIT);
390 hlist_add_head(&pte_chain->link, &page->parent_ptes);
391 pte_chain->parent_ptes[0] = parent_pte;
394 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
397 struct kvm_pte_chain *pte_chain;
398 struct hlist_node *node;
401 if (!page->multimapped) {
402 BUG_ON(page->parent_pte != parent_pte);
403 page->parent_pte = NULL;
406 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
407 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
408 if (!pte_chain->parent_ptes[i])
410 if (pte_chain->parent_ptes[i] != parent_pte)
412 while (i + 1 < NR_PTE_CHAIN_ENTRIES
413 && pte_chain->parent_ptes[i + 1]) {
414 pte_chain->parent_ptes[i]
415 = pte_chain->parent_ptes[i + 1];
418 pte_chain->parent_ptes[i] = NULL;
420 hlist_del(&pte_chain->link);
422 if (hlist_empty(&page->parent_ptes)) {
423 page->multimapped = 0;
424 page->parent_pte = NULL;
432 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
436 struct hlist_head *bucket;
437 struct kvm_mmu_page *page;
438 struct hlist_node *node;
440 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
441 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
442 bucket = &vcpu->kvm->mmu_page_hash[index];
443 hlist_for_each_entry(page, node, bucket, hash_link)
444 if (page->gfn == gfn && !page->role.metaphysical) {
445 pgprintk("%s: found role %x\n",
446 __FUNCTION__, page->role.word);
452 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
459 union kvm_mmu_page_role role;
462 struct hlist_head *bucket;
463 struct kvm_mmu_page *page;
464 struct hlist_node *node;
467 role.glevels = vcpu->mmu.root_level;
469 role.metaphysical = metaphysical;
470 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
471 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
472 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
473 role.quadrant = quadrant;
475 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
477 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
478 bucket = &vcpu->kvm->mmu_page_hash[index];
479 hlist_for_each_entry(page, node, bucket, hash_link)
480 if (page->gfn == gfn && page->role.word == role.word) {
481 mmu_page_add_parent_pte(page, parent_pte);
482 pgprintk("%s: found\n", __FUNCTION__);
485 page = kvm_mmu_alloc_page(vcpu, parent_pte);
488 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
491 hlist_add_head(&page->hash_link, bucket);
493 rmap_write_protect(vcpu->kvm, gfn);
497 static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
498 struct kvm_mmu_page *page)
504 pt = __va(page->page_hpa);
506 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
507 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
508 if (pt[i] & PT_PRESENT_MASK)
509 rmap_remove(vcpu->kvm, &pt[i]);
515 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
519 if (!(ent & PT_PRESENT_MASK))
521 ent &= PT64_BASE_ADDR_MASK;
522 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
526 static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
527 struct kvm_mmu_page *page,
530 mmu_page_remove_parent_pte(page, parent_pte);
533 static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
534 struct kvm_mmu_page *page)
538 while (page->multimapped || page->parent_pte) {
539 if (!page->multimapped)
540 parent_pte = page->parent_pte;
542 struct kvm_pte_chain *chain;
544 chain = container_of(page->parent_ptes.first,
545 struct kvm_pte_chain, link);
546 parent_pte = chain->parent_ptes[0];
549 kvm_mmu_put_page(vcpu, page, parent_pte);
552 kvm_mmu_page_unlink_children(vcpu, page);
553 hlist_del(&page->hash_link);
554 kvm_mmu_free_page(vcpu, page->page_hpa);
557 static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
560 struct hlist_head *bucket;
561 struct kvm_mmu_page *page;
562 struct hlist_node *node, *n;
565 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
567 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
568 bucket = &vcpu->kvm->mmu_page_hash[index];
569 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
570 if (page->gfn == gfn && !page->role.metaphysical) {
571 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
573 kvm_mmu_zap_page(vcpu, page);
579 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
581 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
582 struct kvm_mmu_page *page_head = page_header(__pa(pte));
584 __set_bit(slot, &page_head->slot_bitmap);
587 hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
589 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
591 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
594 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
596 struct kvm_memory_slot *slot;
599 ASSERT((gpa & HPA_ERR_MASK) == 0);
600 slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
602 return gpa | HPA_ERR_MASK;
603 page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
604 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
605 | (gpa & (PAGE_SIZE-1));
608 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
610 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
612 if (gpa == UNMAPPED_GVA)
614 return gpa_to_hpa(vcpu, gpa);
617 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
621 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
623 int level = PT32E_ROOT_LEVEL;
624 hpa_t table_addr = vcpu->mmu.root_hpa;
627 u32 index = PT64_INDEX(v, level);
631 ASSERT(VALID_PAGE(table_addr));
632 table = __va(table_addr);
636 if (is_present_pte(pte) && is_writeble_pte(pte))
638 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
639 page_header_update_slot(vcpu->kvm, table, v);
640 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
642 rmap_add(vcpu->kvm, &table[index]);
646 if (table[index] == 0) {
647 struct kvm_mmu_page *new_table;
650 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
652 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
656 pgprintk("nonpaging_map: ENOMEM\n");
660 table[index] = new_table->page_hpa | PT_PRESENT_MASK
661 | PT_WRITABLE_MASK | PT_USER_MASK;
663 table_addr = table[index] & PT64_BASE_ADDR_MASK;
667 static void mmu_free_roots(struct kvm_vcpu *vcpu)
672 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
673 hpa_t root = vcpu->mmu.root_hpa;
675 ASSERT(VALID_PAGE(root));
676 vcpu->mmu.root_hpa = INVALID_PAGE;
680 for (i = 0; i < 4; ++i) {
681 hpa_t root = vcpu->mmu.pae_root[i];
683 ASSERT(VALID_PAGE(root));
684 root &= PT64_BASE_ADDR_MASK;
685 vcpu->mmu.pae_root[i] = INVALID_PAGE;
687 vcpu->mmu.root_hpa = INVALID_PAGE;
690 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
694 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
697 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
698 hpa_t root = vcpu->mmu.root_hpa;
700 ASSERT(!VALID_PAGE(root));
701 root = kvm_mmu_get_page(vcpu, root_gfn, 0,
702 PT64_ROOT_LEVEL, 0, NULL)->page_hpa;
703 vcpu->mmu.root_hpa = root;
707 for (i = 0; i < 4; ++i) {
708 hpa_t root = vcpu->mmu.pae_root[i];
710 ASSERT(!VALID_PAGE(root));
711 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL)
712 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
713 else if (vcpu->mmu.root_level == 0)
715 root = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
716 PT32_ROOT_LEVEL, !is_paging(vcpu),
718 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
720 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
723 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
728 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
735 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
738 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
740 if (is_error_hpa(paddr))
743 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
746 static void nonpaging_free(struct kvm_vcpu *vcpu)
748 mmu_free_roots(vcpu);
751 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
753 struct kvm_mmu *context = &vcpu->mmu;
755 context->new_cr3 = nonpaging_new_cr3;
756 context->page_fault = nonpaging_page_fault;
757 context->gva_to_gpa = nonpaging_gva_to_gpa;
758 context->free = nonpaging_free;
759 context->root_level = 0;
760 context->shadow_root_level = PT32E_ROOT_LEVEL;
761 mmu_alloc_roots(vcpu);
762 ASSERT(VALID_PAGE(context->root_hpa));
763 kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
767 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
769 ++kvm_stat.tlb_flush;
770 kvm_arch_ops->tlb_flush(vcpu);
773 static void paging_new_cr3(struct kvm_vcpu *vcpu)
775 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
776 mmu_free_roots(vcpu);
777 mmu_alloc_roots(vcpu);
778 kvm_mmu_flush_tlb(vcpu);
779 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
782 static void mark_pagetable_nonglobal(void *shadow_pte)
784 page_header(__pa(shadow_pte))->global = 0;
787 static inline void set_pte_common(struct kvm_vcpu *vcpu,
796 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
798 access_bits &= ~PT_WRITABLE_MASK;
800 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
802 *shadow_pte |= access_bits;
804 if (!(*shadow_pte & PT_GLOBAL_MASK))
805 mark_pagetable_nonglobal(shadow_pte);
807 if (is_error_hpa(paddr)) {
808 *shadow_pte |= gaddr;
809 *shadow_pte |= PT_SHADOW_IO_MARK;
810 *shadow_pte &= ~PT_PRESENT_MASK;
814 *shadow_pte |= paddr;
816 if (access_bits & PT_WRITABLE_MASK) {
817 struct kvm_mmu_page *shadow;
819 shadow = kvm_mmu_lookup_page(vcpu, gfn);
821 pgprintk("%s: found shadow page for %lx, marking ro\n",
823 access_bits &= ~PT_WRITABLE_MASK;
824 *shadow_pte &= ~PT_WRITABLE_MASK;
828 if (access_bits & PT_WRITABLE_MASK)
829 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
831 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
832 rmap_add(vcpu->kvm, shadow_pte);
835 static void inject_page_fault(struct kvm_vcpu *vcpu,
839 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
842 static inline int fix_read_pf(u64 *shadow_ent)
844 if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
845 !(*shadow_ent & PT_USER_MASK)) {
847 * If supervisor write protect is disabled, we shadow kernel
848 * pages as user pages so we can trap the write access.
850 *shadow_ent |= PT_USER_MASK;
851 *shadow_ent &= ~PT_WRITABLE_MASK;
859 static int may_access(u64 pte, int write, int user)
862 if (user && !(pte & PT_USER_MASK))
864 if (write && !(pte & PT_WRITABLE_MASK))
869 static void paging_free(struct kvm_vcpu *vcpu)
871 nonpaging_free(vcpu);
875 #include "paging_tmpl.h"
879 #include "paging_tmpl.h"
882 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
884 struct kvm_mmu *context = &vcpu->mmu;
886 ASSERT(is_pae(vcpu));
887 context->new_cr3 = paging_new_cr3;
888 context->page_fault = paging64_page_fault;
889 context->gva_to_gpa = paging64_gva_to_gpa;
890 context->free = paging_free;
891 context->root_level = level;
892 context->shadow_root_level = level;
893 mmu_alloc_roots(vcpu);
894 ASSERT(VALID_PAGE(context->root_hpa));
895 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
896 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
900 static int paging64_init_context(struct kvm_vcpu *vcpu)
902 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
905 static int paging32_init_context(struct kvm_vcpu *vcpu)
907 struct kvm_mmu *context = &vcpu->mmu;
909 context->new_cr3 = paging_new_cr3;
910 context->page_fault = paging32_page_fault;
911 context->gva_to_gpa = paging32_gva_to_gpa;
912 context->free = paging_free;
913 context->root_level = PT32_ROOT_LEVEL;
914 context->shadow_root_level = PT32E_ROOT_LEVEL;
915 mmu_alloc_roots(vcpu);
916 ASSERT(VALID_PAGE(context->root_hpa));
917 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
918 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
922 static int paging32E_init_context(struct kvm_vcpu *vcpu)
924 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
927 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
930 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
932 if (!is_paging(vcpu))
933 return nonpaging_init_context(vcpu);
934 else if (is_long_mode(vcpu))
935 return paging64_init_context(vcpu);
936 else if (is_pae(vcpu))
937 return paging32E_init_context(vcpu);
939 return paging32_init_context(vcpu);
942 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
945 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
946 vcpu->mmu.free(vcpu);
947 vcpu->mmu.root_hpa = INVALID_PAGE;
951 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
953 destroy_kvm_mmu(vcpu);
954 return init_kvm_mmu(vcpu);
957 void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
959 gfn_t gfn = gpa >> PAGE_SHIFT;
960 struct kvm_mmu_page *page;
961 struct kvm_mmu_page *child;
962 struct hlist_node *node, *n;
963 struct hlist_head *bucket;
967 unsigned offset = offset_in_page(gpa);
969 unsigned page_offset;
974 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
975 if (gfn == vcpu->last_pt_write_gfn) {
976 ++vcpu->last_pt_write_count;
977 if (vcpu->last_pt_write_count >= 3)
980 vcpu->last_pt_write_gfn = gfn;
981 vcpu->last_pt_write_count = 1;
983 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
984 bucket = &vcpu->kvm->mmu_page_hash[index];
985 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
986 if (page->gfn != gfn || page->role.metaphysical)
988 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
989 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
990 if (misaligned || flooded) {
992 * Misaligned accesses are too much trouble to fix
993 * up; also, they usually indicate a page is not used
996 * If we're seeing too many writes to a page,
997 * it may no longer be a page table, or we may be
998 * forking, in which case it is better to unmap the
1001 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1002 gpa, bytes, page->role.word);
1003 kvm_mmu_zap_page(vcpu, page);
1006 page_offset = offset;
1007 level = page->role.level;
1008 if (page->role.glevels == PT32_ROOT_LEVEL) {
1009 page_offset <<= 1; /* 32->64 */
1010 page_offset &= ~PAGE_MASK;
1012 spte = __va(page->page_hpa);
1013 spte += page_offset / sizeof(*spte);
1015 if (is_present_pte(pte)) {
1016 if (level == PT_PAGE_TABLE_LEVEL)
1017 rmap_remove(vcpu->kvm, spte);
1019 child = page_header(pte & PT64_BASE_ADDR_MASK);
1020 mmu_page_remove_parent_pte(child, spte);
1027 void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1031 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1033 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1035 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
1038 void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1040 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1041 struct kvm_mmu_page *page;
1043 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1044 struct kvm_mmu_page, link);
1045 kvm_mmu_zap_page(vcpu, page);
1048 EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
1050 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1052 while (!list_empty(&vcpu->free_pages)) {
1053 struct kvm_mmu_page *page;
1055 page = list_entry(vcpu->free_pages.next,
1056 struct kvm_mmu_page, link);
1057 list_del(&page->link);
1058 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
1059 page->page_hpa = INVALID_PAGE;
1061 free_page((unsigned long)vcpu->mmu.pae_root);
1064 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1071 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
1072 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
1074 INIT_LIST_HEAD(&page_header->link);
1075 if ((page = alloc_page(GFP_KERNEL)) == NULL)
1077 page->private = (unsigned long)page_header;
1078 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
1079 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
1080 list_add(&page_header->link, &vcpu->free_pages);
1081 ++vcpu->kvm->n_free_mmu_pages;
1085 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1086 * Therefore we need to allocate shadow page tables in the first
1087 * 4GB of memory, which happens to fit the DMA32 zone.
1089 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1092 vcpu->mmu.pae_root = page_address(page);
1093 for (i = 0; i < 4; ++i)
1094 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1099 free_mmu_pages(vcpu);
1103 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1106 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1107 ASSERT(list_empty(&vcpu->free_pages));
1109 return alloc_mmu_pages(vcpu);
1112 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1115 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1116 ASSERT(!list_empty(&vcpu->free_pages));
1118 return init_kvm_mmu(vcpu);
1121 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1125 destroy_kvm_mmu(vcpu);
1126 free_mmu_pages(vcpu);
1129 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1131 struct kvm_mmu_page *page;
1133 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1137 if (!test_bit(slot, &page->slot_bitmap))
1140 pt = __va(page->page_hpa);
1141 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1143 if (pt[i] & PT_WRITABLE_MASK) {
1144 rmap_remove(kvm, &pt[i]);
1145 pt[i] &= ~PT_WRITABLE_MASK;