X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmigrate.c;h=449d77d409f52622bbf825b7fd17d43df7ff5fbf;hb=a442ac512f36981182e66a427ad05f449ff6593b;hp=07f22d4a431f5ce63303632749453e9459ece1ee;hpb=541010e4b8921cd781ff02ae68028501457045b6;p=linux-2.6 diff --git a/mm/migrate.c b/mm/migrate.c index 07f22d4a43..449d77d409 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include "internal.h" @@ -114,11 +116,6 @@ int putback_lru_pages(struct list_head *l) return count; } -static inline int is_swap_pte(pte_t pte) -{ - return !pte_none(pte) && !pte_present(pte) && !pte_file(pte); -} - /* * Restore a potential migration pte to a working pte entry */ @@ -167,10 +164,25 @@ static void remove_migration_pte(struct vm_area_struct *vma, if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) goto out; + /* + * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. + * Failure is not an option here: we're now expected to remove every + * migration pte, and will cause crashes otherwise. Normally this + * is not an issue: mem_cgroup_prepare_migration bumped up the old + * page_cgroup count for safety, that's now attached to the new page, + * so this charge should just be another incrementation of the count, + * to keep in balance with rmap.c's mem_cgroup_uncharging. But if + * there's been a force_empty, those reference counts may no longer + * be reliable, and this charge can actually fail: oh well, we don't + * make the situation any worse by proceeding as if it had succeeded. + */ + mem_cgroup_charge(new, mm, GFP_ATOMIC); + get_page(new); pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); if (is_write_migration_entry(entry)) pte = pte_mkwrite(pte); + flush_cache_page(vma, addr, pte_pfn(pte)); set_pte_at(mm, addr, ptep, pte); if (PageAnon(new)) @@ -180,7 +192,6 @@ static void remove_migration_pte(struct vm_area_struct *vma, /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, addr, pte); - lazy_mmu_prot_update(pte); out: pte_unmap_unlock(ptep, ptl); @@ -372,7 +383,14 @@ static void migrate_page_copy(struct page *newpage, struct page *page) if (PageDirty(page)) { clear_page_dirty_for_io(page); - set_page_dirty(newpage); + /* + * Want to mark the page and the radix tree as dirty, and + * redo the accounting that clear_page_dirty_for_io undid, + * but we can't use set_page_dirty because that function + * is actually a signal that all of the page has become dirty. + * Wheras only part of our page may be dirty. + */ + __set_page_dirty_nobuffers(newpage); } #ifdef CONFIG_SWAP @@ -591,9 +609,10 @@ static int move_to_new_page(struct page *newpage, struct page *page) else rc = fallback_migrate_page(mapping, newpage, page); - if (!rc) + if (!rc) { + mem_cgroup_page_migration(page, newpage); remove_migration_ptes(page, newpage); - else + } else newpage->mapping = NULL; unlock_page(newpage); @@ -612,6 +631,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, int *result = NULL; struct page *newpage = get_new_page(page, private, &result); int rcu_locked = 0; + int charge = 0; if (!newpage) return -ENOMEM; @@ -644,23 +664,46 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, rcu_read_lock(); rcu_locked = 1; } + /* - * This is a corner case handling. - * When a new swap-cache is read into, it is linked to LRU - * and treated as swapcache but has no rmap yet. - * Calling try_to_unmap() against a page->mapping==NULL page is - * BUG. So handle it here. + * Corner case handling: + * 1. When a new swap-cache page is read into, it is added to the LRU + * and treated as swapcache but it has no rmap yet. + * Calling try_to_unmap() against a page->mapping==NULL page will + * trigger a BUG. So handle it here. + * 2. An orphaned page (see truncate_complete_page) might have + * fs-private metadata. The page can be picked up due to memory + * offlining. Everywhere else except page reclaim, the page is + * invisible to the vm, so the page can not be migrated. So try to + * free the metadata, so the page can be freed. */ - if (!page->mapping) + if (!page->mapping) { + if (!PageAnon(page) && PagePrivate(page)) { + /* + * Go direct to try_to_free_buffers() here because + * a) that's what try_to_release_page() would do anyway + * b) we may be under rcu_read_lock() here, so we can't + * use GFP_KERNEL which is what try_to_release_page() + * needs to be effective. + */ + try_to_free_buffers(page); + } goto rcu_unlock; + } + + charge = mem_cgroup_prepare_migration(page); /* Establish migration ptes or remove ptes */ try_to_unmap(page, 1); if (!page_mapped(page)) rc = move_to_new_page(newpage, page); - if (rc) + if (rc) { remove_migration_ptes(page, page); + if (charge) + mem_cgroup_end_migration(page); + } else if (charge) + mem_cgroup_end_migration(newpage); rcu_unlock: if (rcu_locked) rcu_read_unlock(); @@ -705,7 +748,7 @@ move_newpage: * The function returns after 10 attempts or if no pages * are movable anymore because to has become empty * or no retryable pages exist anymore. All pages will be - * retruned to the LRU or freed. + * returned to the LRU or freed. * * Return: Number of pages not migrated or error code. */ @@ -924,7 +967,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, /* Find the mm_struct */ read_lock(&tasklist_lock); - task = pid ? find_task_by_pid(pid) : current; + task = pid ? find_task_by_vpid(pid) : current; if (!task) { read_unlock(&tasklist_lock); return -ESRCH; @@ -986,7 +1029,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, goto out; err = -ENODEV; - if (!node_online(node)) + if (!node_state(node, N_HIGH_MEMORY)) goto out; err = -EACCES;