X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmigrate.c;h=06d0877a66efa74a3d599b4ea50f0c5100dd7a1f;hb=6af2acb6619688046039234f716fd003e6ed2b3f;hp=c8d87221f36809785cb58266a711b9f5a1b0df7f;hpb=dc386d4d1e98bb39fb967ee156cd456c802fc692;p=linux-2.6 diff --git a/mm/migrate.c b/mm/migrate.c index c8d87221f3..06d0877a66 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -49,9 +49,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist) struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); - if (PageLRU(page)) { + if (PageLRU(page) && get_page_unless_zero(page)) { ret = 0; - get_page(page); ClearPageLRU(page); if (PageActive(page)) del_page_from_active_list(zone, page); @@ -172,6 +171,7 @@ static void remove_migration_pte(struct vm_area_struct *vma, pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); if (is_write_migration_entry(entry)) pte = pte_mkwrite(pte); + flush_cache_page(vma, addr, pte_pfn(pte)); set_pte_at(mm, addr, ptep, pte); if (PageAnon(new)) @@ -181,7 +181,6 @@ static void remove_migration_pte(struct vm_area_struct *vma, /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, addr, pte); - lazy_mmu_prot_update(pte); out: pte_unmap_unlock(ptep, ptl); @@ -612,6 +611,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, int rc = 0; int *result = NULL; struct page *newpage = get_new_page(page, private, &result); + int rcu_locked = 0; if (!newpage) return -ENOMEM; @@ -637,8 +637,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, * we cannot notice that anon_vma is freed while we migrates a page. * This rcu_read_lock() delays freeing anon_vma pointer until the end * of migration. File cache pages are no problem because of page_lock() + * File Caches may use write_page() or lock_page() in migration, then, + * just care Anon page here. */ - rcu_read_lock(); + if (PageAnon(page)) { + rcu_read_lock(); + rcu_locked = 1; + } /* * This is a corner case handling. * When a new swap-cache is read into, it is linked to LRU @@ -657,7 +662,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, if (rc) remove_migration_ptes(page, page); rcu_unlock: - rcu_read_unlock(); + if (rcu_locked) + rcu_read_unlock(); unlock: @@ -966,7 +972,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, * array. Return various errors if the user did something wrong. */ for (i = 0; i < nr_pages; i++) { - const void *p; + const void __user *p; err = -EFAULT; if (get_user(p, pages + i)) @@ -980,7 +986,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, goto out; err = -ENODEV; - if (!node_online(node)) + if (!node_state(node, N_HIGH_MEMORY)) goto out; err = -EACCES;