#define KVM_MAX_VCPUS 4
#define KVM_ALIAS_SLOTS 4
#define KVM_MEMORY_SLOTS 8
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_NUM_MMU_PAGES 1024
int naliases;
struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
int nmemslots;
- struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
+ struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
+ KVM_PRIVATE_MEM_SLOTS];
/*
* Hash table of struct kvm_mmu_page.
*/
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
enum emulation_result {
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
- if (mem->slot >= KVM_MEMORY_SLOTS)
+ if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
kvm_userspace_memory_region *mem,
int user_alloc)
{
+ if (mem->slot >= KVM_MEMORY_SLOTS)
+ return -EINVAL;
return kvm_set_memory_region(kvm, mem, user_alloc);
}
return __gfn_to_memslot(kvm, gfn);
}
+int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ int i;
+
+ gfn = unalias_gfn(kvm, gfn);
+ for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
+ struct kvm_memory_slot *memslot = &kvm->memslots[i];
+
+ if (gfn >= memslot->base_gfn
+ && gfn < memslot->base_gfn + memslot->npages)
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
+
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
struct page *page;
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ if (!kvm_is_visible_gfn(kvm, pgoff))
+ return NOPAGE_SIGBUS;
page = gfn_to_page(kvm, pgoff);
if (is_error_page(page)) {
kvm_release_page(page);