1 #ifndef __KVM_X86_MMU_H
2 #define __KVM_X86_MMU_H
4 #include <linux/kvm_host.h>
7 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
8 #define PT32_PT_BITS 10
9 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
11 #define PT_WRITABLE_SHIFT 1
13 #define PT_PRESENT_MASK (1ULL << 0)
14 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
15 #define PT_USER_MASK (1ULL << 2)
16 #define PT_PWT_MASK (1ULL << 3)
17 #define PT_PCD_MASK (1ULL << 4)
18 #define PT_ACCESSED_MASK (1ULL << 5)
19 #define PT_DIRTY_MASK (1ULL << 6)
20 #define PT_PAGE_SIZE_MASK (1ULL << 7)
21 #define PT_PAT_MASK (1ULL << 7)
22 #define PT_GLOBAL_MASK (1ULL << 8)
23 #define PT64_NX_SHIFT 63
24 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
26 #define PT_PAT_SHIFT 7
27 #define PT_DIR_PAT_SHIFT 12
28 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
30 #define PT32_DIR_PSE36_SIZE 4
31 #define PT32_DIR_PSE36_SHIFT 13
32 #define PT32_DIR_PSE36_MASK \
33 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
35 #define PT64_ROOT_LEVEL 4
36 #define PT32_ROOT_LEVEL 2
37 #define PT32E_ROOT_LEVEL 3
39 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
41 if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
42 __kvm_mmu_free_some_pages(vcpu);
45 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
47 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
50 return kvm_mmu_load(vcpu);
53 static inline int is_long_mode(struct kvm_vcpu *vcpu)
56 return vcpu->arch.shadow_efer & EFER_LME;
62 static inline int is_pae(struct kvm_vcpu *vcpu)
64 return vcpu->arch.cr4 & X86_CR4_PAE;
67 static inline int is_pse(struct kvm_vcpu *vcpu)
69 return vcpu->arch.cr4 & X86_CR4_PSE;
72 static inline int is_paging(struct kvm_vcpu *vcpu)
74 return vcpu->arch.cr0 & X86_CR0_PG;