int user_alloc);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
-void kvm_release_page(struct page *page);
+void kvm_release_page_clean(struct page *page);
+void kvm_release_page_dirty(struct page *page);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
EXPORT_SYMBOL_GPL(gfn_to_page);
-void kvm_release_page(struct page *page)
+void kvm_release_page_clean(struct page *page)
+{
+ put_page(page);
+}
+EXPORT_SYMBOL_GPL(kvm_release_page_clean);
+
+void kvm_release_page_dirty(struct page *page)
{
if (!PageReserved(page))
SetPageDirty(page);
put_page(page);
}
-EXPORT_SYMBOL_GPL(kvm_release_page);
+EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
static int next_segment(unsigned long len, int offset)
{
/* current->mm->mmap_sem is already held so call lockless version */
page = __gfn_to_page(kvm, pgoff);
if (is_error_page(page)) {
- kvm_release_page(page);
+ kvm_release_page_clean(page);
return NOPAGE_SIGBUS;
}
if (type != NULL)
struct kvm_rmap_desc *desc;
struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *page;
+ struct page *release_page;
unsigned long *rmapp;
int i;
if (!is_rmap_pte(*spte))
return;
page = page_header(__pa(spte));
- kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
- PAGE_SHIFT));
+ release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+ if (is_writeble_pte(*spte))
+ kvm_release_page_dirty(release_page);
+ else
+ kvm_release_page_clean(release_page);
rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
{
int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->mmu.root_hpa;
+ struct page *page;
+ page = pfn_to_page(p >> PAGE_SHIFT);
for (; ; level--) {
u32 index = PT64_INDEX(v, level);
u64 *table;
pte = table[index];
was_rmapped = is_rmap_pte(pte);
if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
- kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+ kvm_release_page_clean(page);
return 0;
}
mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
if (!was_rmapped)
rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
else
- kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+ kvm_release_page_clean(page);
+
return 0;
}
1, 3, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
- kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+ kvm_release_page_clean(page);
return -ENOMEM;
}
paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
if (is_error_hpa(paddr)) {
- kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
- >> PAGE_SHIFT));
+ kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
return 1;
}
" valid guest gva %lx\n", audit_msg, va);
page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT);
- kvm_release_page(page);
+ kvm_release_page_clean(page);
}
}
if (is_error_hpa(paddr)) {
set_shadow_pte(shadow_pte,
shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
- kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
- >> PAGE_SHIFT));
+ kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
return;
}
page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT);
- kvm_release_page(page);
+ kvm_release_page_clean(page);
}
}
else
- kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
- >> PAGE_SHIFT));
+ kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+ >> PAGE_SHIFT));
if (!ptwrite || !*ptwrite)
vcpu->last_pte_updated = shadow_pte;
}
else
sp->spt[i] = shadow_notrap_nonpresent_pte;
kunmap_atomic(gpt, KM_USER0);
- kvm_release_page(page);
+ kvm_release_page_clean(page);
}
#undef pt_element_t
for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
if (vcpu->pio.guest_pages[i]) {
- kvm_release_page(vcpu->pio.guest_pages[i]);
+ kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
vcpu->pio.guest_pages[i] = NULL;
}
}