X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Frmap.c;h=dbc2ca2057a54ff2c4a709de4b6e9a01694241bb;hb=980110c5da56cb56d3356f5a5251fdc920f83ba6;hp=304f51985c78504cb67056922bfd2bb7c34a6e21;hpb=5c34202b8bf942da411b6599668a76b07449bbfd;p=linux-2.6 diff --git a/mm/rmap.c b/mm/rmap.c index 304f51985c..dbc2ca2057 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -36,6 +36,7 @@ * mapping->tree_lock (widely used, in set_page_dirty, * in arch-dependent flush_dcache_mmap_lock, * within inode_lock in __sync_single_inode) + * zone->lock (within radix tree node alloc) */ #include @@ -53,24 +54,6 @@ struct kmem_cache *anon_vma_cachep; -static inline void validate_anon_vma(struct vm_area_struct *find_vma) -{ -#ifdef CONFIG_DEBUG_VM - struct anon_vma *anon_vma = find_vma->anon_vma; - struct vm_area_struct *vma; - unsigned int mapcount = 0; - int found = 0; - - list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - mapcount++; - BUG_ON(mapcount > 100000); - if (vma == find_vma) - found = 1; - } - BUG_ON(!found); -#endif -} - /* This must be called under the mmap_sem. */ int anon_vma_prepare(struct vm_area_struct *vma) { @@ -121,10 +104,8 @@ void __anon_vma_link(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; - if (anon_vma) { + if (anon_vma) list_add_tail(&vma->anon_vma_node, &anon_vma->head); - validate_anon_vma(vma); - } } void anon_vma_link(struct vm_area_struct *vma) @@ -134,7 +115,6 @@ void anon_vma_link(struct vm_area_struct *vma) if (anon_vma) { spin_lock(&anon_vma->lock); list_add_tail(&vma->anon_vma_node, &anon_vma->head); - validate_anon_vma(vma); spin_unlock(&anon_vma->lock); } } @@ -148,7 +128,6 @@ void anon_vma_unlink(struct vm_area_struct *vma) return; spin_lock(&anon_vma->lock); - validate_anon_vma(vma); list_del(&vma->anon_vma_node); /* We must garbage collect the anon_vma if it's empty */ @@ -159,21 +138,18 @@ void anon_vma_unlink(struct vm_area_struct *vma) anon_vma_free(anon_vma); } -static void anon_vma_ctor(void *data, struct kmem_cache *cachep, - unsigned long flags) +static void anon_vma_ctor(struct kmem_cache *cachep, void *data) { - if (flags & SLAB_CTOR_CONSTRUCTOR) { - struct anon_vma *anon_vma = data; + struct anon_vma *anon_vma = data; - spin_lock_init(&anon_vma->lock); - INIT_LIST_HEAD(&anon_vma->head); - } + spin_lock_init(&anon_vma->lock); + INIT_LIST_HEAD(&anon_vma->head); } void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); + 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); } /* @@ -207,7 +183,9 @@ static void page_unlock_anon_vma(struct anon_vma *anon_vma) } /* - * At what user virtual address is page expected in vma? + * At what user virtual address is page expected in @vma? + * Returns virtual address or -EFAULT if page's index/offset is not + * within the range mapped the @vma. */ static inline unsigned long vma_address(struct page *page, struct vm_area_struct *vma) @@ -217,8 +195,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { - /* page should be within any vma from prio_tree_next */ - BUG_ON(!PageAnon(page)); + /* page should be within @vma mapping range */ return -EFAULT; } return address; @@ -460,7 +437,6 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) entry = pte_wrprotect(entry); entry = pte_mkclean(entry); set_pte_at(mm, address, pte, entry); - lazy_mmu_prot_update(entry); ret = 1; } @@ -495,11 +471,12 @@ int page_mkclean(struct page *page) if (page_mapped(page)) { struct address_space *mapping = page_mapping(page); - if (mapping) + if (mapping) { ret = page_mkclean_file(mapping, page); - if (page_test_dirty(page)) { - page_clear_dirty(page); - ret = 1; + if (page_test_dirty(page)) { + page_clear_dirty(page); + ret = 1; + } } } @@ -531,20 +508,52 @@ static void __page_set_anon_rmap(struct page *page, __inc_zone_page_state(page, NR_ANON_PAGES); } +/** + * page_set_anon_rmap - sanity check anonymous rmap addition + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @address: the user virtual address mapped + */ +static void __page_check_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ +#ifdef CONFIG_DEBUG_VM + /* + * The page's anon-rmap details (mapping and index) are guaranteed to + * be set up correctly at this point. + * + * We have exclusion against page_add_anon_rmap because the caller + * always holds the page locked, except if called from page_dup_rmap, + * in which case the page is already known to be setup. + * + * We have exclusion against page_add_new_anon_rmap because those pages + * are initially only visible via the pagetables, and the pte is locked + * over the call to page_add_new_anon_rmap. + */ + struct anon_vma *anon_vma = vma->anon_vma; + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; + BUG_ON(page->mapping != (struct address_space *)anon_vma); + BUG_ON(page->index != linear_page_index(vma, address)); +#endif +} + /** * page_add_anon_rmap - add pte mapping to an anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * - * The caller needs to hold the pte lock. + * The caller needs to hold the pte lock and the page must be locked. */ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); if (atomic_inc_and_test(&page->_mapcount)) __page_set_anon_rmap(page, vma, address); - /* else checking page index and mapping is racy */ + else + __page_check_anon_rmap(page, vma, address); } /* @@ -555,10 +564,12 @@ void page_add_anon_rmap(struct page *page, * * Same as page_add_anon_rmap but must only be called on *new* pages. * This means the inc-and-test can be bypassed. + * Page does not have to be locked. */ void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { + BUG_ON(address < vma->vm_start || address >= vma->vm_end); atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ __page_set_anon_rmap(page, vma, address); } @@ -575,6 +586,26 @@ void page_add_file_rmap(struct page *page) __inc_zone_page_state(page, NR_FILE_MAPPED); } +#ifdef CONFIG_DEBUG_VM +/** + * page_dup_rmap - duplicate pte mapping to a page + * @page: the page to add the mapping to + * + * For copy_page_range only: minimal extract from page_add_file_rmap / + * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's + * quicker. + * + * The caller needs to hold the pte lock. + */ +void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) +{ + BUG_ON(page_mapcount(page) == 0); + if (PageAnon(page)) + __page_check_anon_rmap(page, vma, address); + atomic_inc(&page->_mapcount); +} +#endif + /** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from @@ -591,8 +622,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma) printk (KERN_EMERG " page->count = %x\n", page_count(page)); printk (KERN_EMERG " page->mapping = %p\n", page->mapping); print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); - if (vma->vm_ops) + if (vma->vm_ops) { print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage); + print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault); + } if (vma->vm_file && vma->vm_file->f_op) print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); BUG();