struct kvm_mmu_page *page_head = page_header(page_hpa);
ASSERT(is_empty_shadow_page(page_hpa));
- list_del(&page_head->link);
page_head->page_hpa = page_hpa;
- list_add(&page_head->link, &vcpu->free_pages);
+ list_move(&page_head->link, &vcpu->free_pages);
++vcpu->kvm->n_free_mmu_pages;
}
return NULL;
page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
- list_del(&page->link);
- list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+ list_move(&page->link, &vcpu->kvm->active_mmu_pages);
ASSERT(is_empty_shadow_page(page->page_hpa));
page->slot_bitmap = 0;
- page->global = 1;
page->multimapped = 0;
page->parent_pte = parent_pte;
--vcpu->kvm->n_free_mmu_pages;
gva_t gaddr,
unsigned level,
int metaphysical,
+ unsigned hugepage_access,
u64 *parent_pte)
{
union kvm_mmu_page_role role;
role.glevels = vcpu->mmu.root_level;
role.level = level;
role.metaphysical = metaphysical;
+ role.hugepage_access = hugepage_access;
if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
if (!page->root_count) {
hlist_del(&page->hash_link);
kvm_mmu_free_page(vcpu, page->page_hpa);
- } else {
- list_del(&page->link);
- list_add(&page->link, &vcpu->kvm->active_mmu_pages);
- }
+ } else
+ list_move(&page->link, &vcpu->kvm->active_mmu_pages);
}
static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
>> PAGE_SHIFT;
new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
v, level - 1,
- 1, &table[index]);
+ 1, 0, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
return -ENOMEM;
ASSERT(!VALID_PAGE(root));
page = kvm_mmu_get_page(vcpu, root_gfn, 0,
- PT64_ROOT_LEVEL, 0, NULL);
+ PT64_ROOT_LEVEL, 0, 0, NULL);
root = page->page_hpa;
++page->root_count;
vcpu->mmu.root_hpa = root;
root_gfn = 0;
page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu),
- NULL);
+ 0, NULL);
root = page->page_hpa;
++page->root_count;
vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
}
-static void mark_pagetable_nonglobal(void *shadow_pte)
-{
- page_header(__pa(shadow_pte))->global = 0;
-}
-
static inline void set_pte_common(struct kvm_vcpu *vcpu,
u64 *shadow_pte,
gpa_t gaddr,
*shadow_pte |= access_bits;
- if (!(*shadow_pte & PT_GLOBAL_MASK))
- mark_pagetable_nonglobal(shadow_pte);
-
if (is_error_hpa(paddr)) {
*shadow_pte |= gaddr;
*shadow_pte |= PT_SHADOW_IO_MARK;