X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fswapfile.c;h=60cd24a55204efc5c84956443d02f9806bd66a9d;hb=661299d9d0437a0ff72240f3d60016ac3a361a6e;hp=a60e0075d55bd4d5692a5b39f146a7cedffe51e7;hpb=1da177e4c3f41524e886b7f1b8a0c1fc7321cac2;p=linux-2.6 diff --git a/mm/swapfile.c b/mm/swapfile.c index a60e0075d5..60cd24a552 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -79,7 +79,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) WARN_ON(page_count(page) <= 1); bdi = bdev->bd_inode->i_mapping->backing_dev_info; - bdi->unplug_io_fn(bdi, page); + blk_run_backing_dev(bdi, page); } up_read(&swap_unplug_sem); } @@ -276,61 +276,37 @@ void swap_free(swp_entry_t entry) } /* - * Check if we're the only user of a swap page, - * when the page is locked. + * How many references to page are currently swapped out? */ -static int exclusive_swap_page(struct page *page) +static inline int page_swapcount(struct page *page) { - int retval = 0; - struct swap_info_struct * p; + int count = 0; + struct swap_info_struct *p; swp_entry_t entry; entry.val = page->private; p = swap_info_get(entry); if (p) { - /* Is the only swap cache user the cache itself? */ - if (p->swap_map[swp_offset(entry)] == 1) { - /* Recheck the page count with the swapcache lock held.. */ - write_lock_irq(&swapper_space.tree_lock); - if (page_count(page) == 2) - retval = 1; - write_unlock_irq(&swapper_space.tree_lock); - } + /* Subtract the 1 for the swap cache itself */ + count = p->swap_map[swp_offset(entry)] - 1; swap_info_put(p); } - return retval; + return count; } /* * We can use this swap cache entry directly * if there are no other references to it. - * - * Here "exclusive_swap_page()" does the real - * work, but we opportunistically check whether - * we need to get all the locks first.. */ int can_share_swap_page(struct page *page) { - int retval = 0; + int count; - if (!PageLocked(page)) - BUG(); - switch (page_count(page)) { - case 3: - if (!PagePrivate(page)) - break; - /* Fallthrough */ - case 2: - if (!PageSwapCache(page)) - break; - retval = exclusive_swap_page(page); - break; - case 1: - if (PageReserved(page)) - break; - retval = 1; - } - return retval; + BUG_ON(!PageLocked(page)); + count = page_mapcount(page); + if (count <= 1 && PageSwapCache(page)) + count += page_swapcount(page); + return count == 1; } /* @@ -529,9 +505,10 @@ static int unuse_mm(struct mm_struct *mm, if (!down_read_trylock(&mm->mmap_sem)) { /* - * Our reference to the page stops try_to_unmap_one from - * unmapping its ptes, so swapoff can make progress. + * Activate page so shrink_cache is unlikely to unmap its + * ptes while lock is dropped, so swapoff can make progress. */ + activate_page(page); unlock_page(page); down_read(&mm->mmap_sem); lock_page(page);