]> err.no Git - linux-2.6/blobdiff - drivers/kvm/mmu.c
KVM: Use list_move()
[linux-2.6] / drivers / kvm / mmu.c
index e85b4c7c36f7139bc20a7bcb3138ac4edb654acd..d81b9cd3465fcf80b9bf290d3aef907ea55098ac 100644 (file)
@@ -437,9 +437,8 @@ static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
        struct kvm_mmu_page *page_head = page_header(page_hpa);
 
        ASSERT(is_empty_shadow_page(page_hpa));
-       list_del(&page_head->link);
        page_head->page_hpa = page_hpa;
-       list_add(&page_head->link, &vcpu->free_pages);
+       list_move(&page_head->link, &vcpu->free_pages);
        ++vcpu->kvm->n_free_mmu_pages;
 }
 
@@ -457,11 +456,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
                return NULL;
 
        page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
-       list_del(&page->link);
-       list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+       list_move(&page->link, &vcpu->kvm->active_mmu_pages);
        ASSERT(is_empty_shadow_page(page->page_hpa));
        page->slot_bitmap = 0;
-       page->global = 1;
        page->multimapped = 0;
        page->parent_pte = parent_pte;
        --vcpu->kvm->n_free_mmu_pages;
@@ -569,6 +566,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gva_t gaddr,
                                             unsigned level,
                                             int metaphysical,
+                                            unsigned hugepage_access,
                                             u64 *parent_pte)
 {
        union kvm_mmu_page_role role;
@@ -582,6 +580,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        role.glevels = vcpu->mmu.root_level;
        role.level = level;
        role.metaphysical = metaphysical;
+       role.hugepage_access = hugepage_access;
        if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
                quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
@@ -669,10 +668,8 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
        if (!page->root_count) {
                hlist_del(&page->hash_link);
                kvm_mmu_free_page(vcpu, page->page_hpa);
-       } else {
-               list_del(&page->link);
-               list_add(&page->link, &vcpu->kvm->active_mmu_pages);
-       }
+       } else
+               list_move(&page->link, &vcpu->kvm->active_mmu_pages);
 }
 
 static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -735,6 +732,15 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
        return gpa_to_hpa(vcpu, gpa);
 }
 
+struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
+{
+       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+
+       if (gpa == UNMAPPED_GVA)
+               return NULL;
+       return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT);
+}
+
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 {
 }
@@ -772,7 +778,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                                >> PAGE_SHIFT;
                        new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
                                                     v, level - 1,
-                                                    1, &table[index]);
+                                                    1, 0, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
                                return -ENOMEM;
@@ -827,7 +833,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 
                ASSERT(!VALID_PAGE(root));
                page = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                                       PT64_ROOT_LEVEL, 0, NULL);
+                                       PT64_ROOT_LEVEL, 0, 0, NULL);
                root = page->page_hpa;
                ++page->root_count;
                vcpu->mmu.root_hpa = root;
@@ -844,7 +850,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        root_gfn = 0;
                page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                        PT32_ROOT_LEVEL, !is_paging(vcpu),
-                                       NULL);
+                                       0, NULL);
                root = page->page_hpa;
                ++page->root_count;
                vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
@@ -918,11 +924,6 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
        kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
 }
 
-static void mark_pagetable_nonglobal(void *shadow_pte)
-{
-       page_header(__pa(shadow_pte))->global = 0;
-}
-
 static inline void set_pte_common(struct kvm_vcpu *vcpu,
                             u64 *shadow_pte,
                             gpa_t gaddr,
@@ -940,9 +941,6 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
 
        *shadow_pte |= access_bits;
 
-       if (!(*shadow_pte & PT_GLOBAL_MASK))
-               mark_pagetable_nonglobal(shadow_pte);
-
        if (is_error_hpa(paddr)) {
                *shadow_pte |= gaddr;
                *shadow_pte |= PT_SHADOW_IO_MARK;
@@ -1171,6 +1169,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
                         * and zap two pdes instead of one.
                         */
                        if (level == PT32_ROOT_LEVEL) {
+                               page_offset &= ~7; /* kill rounding error */
                                page_offset <<= 1;
                                npte = 2;
                        }
@@ -1359,7 +1358,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
 
 static void audit_mappings(struct kvm_vcpu *vcpu)
 {
-       int i;
+       unsigned i;
 
        if (vcpu->mmu.root_level == 4)
                audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);