/*
* Lock ordering in mm:
*
- * inode->i_sem (while writing or truncating, not reading or faulting)
+ * inode->i_mutex (while writing or truncating, not reading or faulting)
* inode->i_alloc_sem
*
* When a page fault occurs in writing from user to file, down_read
- * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
- * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
- * taken together; in truncation, i_sem is taken outermost.
+ * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within
+ * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never
+ * taken together; in truncation, i_mutex is taken outermost.
*
* mm->mmap_sem
* page->flags PG_locked (lock_page)
* mapping->i_mmap_lock
* anon_vma->lock
* mm->page_table_lock or pte_lock
- * zone->lru_lock (in mark_page_accessed)
+ * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
* swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
* mapping->private_lock (in __set_page_dirty_buffers)
#include <linux/init.h>
#include <linux/rmap.h>
#include <linux/rcupdate.h>
+#include <linux/module.h>
#include <asm/tlbflush.h>
return anon_vma;
}
+#ifdef CONFIG_MIGRATION
+/*
+ * Remove an anonymous page from swap replacing the swap pte's
+ * through real pte's pointing to valid pages and then releasing
+ * the page from the swap cache.
+ *
+ * Must hold page lock on page.
+ */
+void remove_from_swap(struct page *page)
+{
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+
+ if (!PageAnon(page) || !PageSwapCache(page))
+ return;
+
+ anon_vma = page_lock_anon_vma(page);
+ if (!anon_vma)
+ return;
+
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
+ remove_vma_swap(vma, page);
+
+ spin_unlock(&anon_vma->lock);
+
+ delete_from_swap_cache(page);
+}
+EXPORT_SYMBOL(remove_from_swap);
+#endif
+
/*
* At what user virtual address is page expected in vma?
*/
/*
* At what user virtual address is page expected in vma? checking that the
- * page matches the vma: currently only used by unuse_process, on anon pages.
+ * page matches the vma: currently only used on anon pages, by unuse_vma;
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
(void *)page->mapping - PAGE_MAPPING_ANON)
return -EFAULT;
} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
- if (vma->vm_file->f_mapping != page->mapping)
+ if (!vma->vm_file ||
+ vma->vm_file->f_mapping != page->mapping)
return -EFAULT;
} else
return -EFAULT;
* repeatedly from either page_referenced_anon or page_referenced_file.
*/
static int page_referenced_one(struct page *page,
- struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token)
+ struct vm_area_struct *vma, unsigned int *mapcount)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
/* Pretend the page is referenced if the task has the
swap token and is in the middle of a page fault. */
- if (mm != current->mm && !ignore_token && has_swap_token(mm) &&
+ if (mm != current->mm && has_swap_token(mm) &&
rwsem_is_locked(&mm->mmap_sem))
referenced++;
return referenced;
}
-static int page_referenced_anon(struct page *page, int ignore_token)
+static int page_referenced_anon(struct page *page)
{
unsigned int mapcount;
struct anon_vma *anon_vma;
mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- referenced += page_referenced_one(page, vma, &mapcount,
- ignore_token);
+ referenced += page_referenced_one(page, vma, &mapcount);
if (!mapcount)
break;
}
*
* This function is only called from page_referenced for object-based pages.
*/
-static int page_referenced_file(struct page *page, int ignore_token)
+static int page_referenced_file(struct page *page)
{
unsigned int mapcount;
struct address_space *mapping = page->mapping;
referenced++;
break;
}
- referenced += page_referenced_one(page, vma, &mapcount,
- ignore_token);
+ referenced += page_referenced_one(page, vma, &mapcount);
if (!mapcount)
break;
}
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page.
*/
-int page_referenced(struct page *page, int is_locked, int ignore_token)
+int page_referenced(struct page *page, int is_locked)
{
int referenced = 0;
- if (!swap_token_default_timeout)
- ignore_token = 1;
-
if (page_test_and_clear_young(page))
referenced++;
if (page_mapped(page) && page->mapping) {
if (PageAnon(page))
- referenced += page_referenced_anon(page, ignore_token);
+ referenced += page_referenced_anon(page);
else if (is_locked)
- referenced += page_referenced_file(page, ignore_token);
+ referenced += page_referenced_file(page);
else if (TestSetPageLocked(page))
referenced++;
else {
if (page->mapping)
- referenced += page_referenced_file(page,
- ignore_token);
+ referenced += page_referenced_file(page);
unlock_page(page);
}
}
return referenced;
}
+/**
+ * page_set_anon_rmap - setup new anonymous rmap
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ */
+static void __page_set_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ struct anon_vma *anon_vma = vma->anon_vma;
+
+ BUG_ON(!anon_vma);
+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+ page->mapping = (struct address_space *) anon_vma;
+
+ page->index = linear_page_index(vma, address);
+
+ /*
+ * nr_mapped state can be updated without turning off
+ * interrupts because it is not modified via interrupt.
+ */
+ __inc_page_state(nr_mapped);
+}
+
/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- if (atomic_inc_and_test(&page->_mapcount)) {
- struct anon_vma *anon_vma = vma->anon_vma;
-
- BUG_ON(!anon_vma);
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- page->mapping = (struct address_space *) anon_vma;
-
- page->index = linear_page_index(vma, address);
-
- inc_page_state(nr_mapped);
- }
+ if (atomic_inc_and_test(&page->_mapcount))
+ __page_set_anon_rmap(page, vma, address);
/* else checking page index and mapping is racy */
}
+/*
+ * page_add_new_anon_rmap - add pte mapping to a new anonymous page
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ *
+ * Same as page_add_anon_rmap but must only be called on *new* pages.
+ * This means the inc-and-test can be bypassed.
+ */
+void page_add_new_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
+ __page_set_anon_rmap(page, vma, address);
+}
+
/**
* page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to
BUG_ON(!pfn_valid(page_to_pfn(page)));
if (atomic_inc_and_test(&page->_mapcount))
- inc_page_state(nr_mapped);
+ __inc_page_state(nr_mapped);
}
/**
void page_remove_rmap(struct page *page)
{
if (atomic_add_negative(-1, &page->_mapcount)) {
+ if (page_mapcount(page) < 0) {
+ printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
+ printk (KERN_EMERG " page->flags = %lx\n", page->flags);
+ printk (KERN_EMERG " page->count = %x\n", page_count(page));
+ printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
+ }
+
BUG_ON(page_mapcount(page) < 0);
/*
* It would be tidy to reset the PageAnon mapping here,
*/
if (page_test_and_clear_dirty(page))
set_page_dirty(page);
- dec_page_state(nr_mapped);
+ __dec_page_state(nr_mapped);
}
}
* Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
+static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ int ignore_refs)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
* If the page is mlock()d, we cannot swap it out.
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
- *
- * Pages belonging to VM_RESERVED regions should not happen here.
*/
- if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
- ptep_clear_flush_young(vma, address, pte)) {
+ if ((vma->vm_flags & VM_LOCKED) ||
+ (ptep_clear_flush_young(vma, address, pte)
+ && !ignore_refs)) {
ret = SWAP_FAIL;
goto out_unmap;
}
struct page *page;
unsigned long address;
unsigned long end;
- unsigned long pfn;
address = (vma->vm_start + cursor) & CLUSTER_MASK;
end = address + CLUSTER_SIZE;
for (; address < end; pte++, address += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
-
- pfn = pte_pfn(*pte);
- if (unlikely(!pfn_valid(pfn))) {
- print_bad_pte(vma, *pte, address);
- continue;
- }
-
- page = pfn_to_page(pfn);
- BUG_ON(PageAnon(page));
+ page = vm_normal_page(vma, address, *pte);
+ BUG_ON(!page || PageAnon(page));
if (ptep_clear_flush_young(vma, address, pte))
continue;
/* Nuke the page table entry. */
- flush_cache_page(vma, address, pfn);
+ flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
pte_unmap_unlock(pte - 1, ptl);
}
-static int try_to_unmap_anon(struct page *page)
+static int try_to_unmap_anon(struct page *page, int ignore_refs)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- ret = try_to_unmap_one(page, vma);
+ ret = try_to_unmap_one(page, vma, ignore_refs);
if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
*
* This function is only called from try_to_unmap for object-based pages.
*/
-static int try_to_unmap_file(struct page *page)
+static int try_to_unmap_file(struct page *page, int ignore_refs)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- ret = try_to_unmap_one(page, vma);
+ ret = try_to_unmap_one(page, vma, ignore_refs);
if (ret == SWAP_FAIL || !page_mapped(page))
goto out;
}
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
+ if (vma->vm_flags & VM_LOCKED)
continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
+ if (vma->vm_flags & VM_LOCKED)
continue;
cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor &&
* in locked vmas). Reset cursor on all unreserved nonlinear
* vmas, now forgetting on which ones it had fallen behind.
*/
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
- shared.vm_set.list) {
- if (!(vma->vm_flags & VM_RESERVED))
- vma->vm_private_data = NULL;
- }
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
+ vma->vm_private_data = NULL;
out:
spin_unlock(&mapping->i_mmap_lock);
return ret;
* SWAP_AGAIN - we missed a mapping, try again later
* SWAP_FAIL - the page is unswappable
*/
-int try_to_unmap(struct page *page)
+int try_to_unmap(struct page *page, int ignore_refs)
{
int ret;
BUG_ON(!PageLocked(page));
if (PageAnon(page))
- ret = try_to_unmap_anon(page);
+ ret = try_to_unmap_anon(page, ignore_refs);
else
- ret = try_to_unmap_file(page);
+ ret = try_to_unmap_file(page, ignore_refs);
if (!page_mapped(page))
ret = SWAP_SUCCESS;