]> err.no Git - linux-2.6/blobdiff - mm/migrate.c
sdio: core support for SDIO function interrupt
[linux-2.6] / mm / migrate.c
index 251a8d158257a39fd87c1aa26f6f13f758048715..e2fdbce1874b4ff7a481621b6c5a3285d8d75260 100644 (file)
@@ -25,6 +25,9 @@
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/writeback.h>
+#include <linux/mempolicy.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
 
 #include "internal.h"
 
@@ -46,9 +49,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist)
                struct zone *zone = page_zone(page);
 
                spin_lock_irq(&zone->lru_lock);
-               if (PageLRU(page)) {
+               if (PageLRU(page) && get_page_unless_zero(page)) {
                        ret = 0;
-                       get_page(page);
                        ClearPageLRU(page);
                        if (PageActive(page))
                                del_page_from_active_list(zone, page);
@@ -62,9 +64,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist)
 }
 
 /*
- * migrate_prep() needs to be called after we have compiled the list of pages
- * to be migrated using isolate_lru_page() but before we begin a series of calls
- * to migrate_pages().
+ * migrate_prep() needs to be called before we start compiling a list of pages
+ * to be migrated using isolate_lru_page().
  */
 int migrate_prep(void)
 {
@@ -292,10 +293,10 @@ out:
 static int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page)
 {
-       struct page **radix_pointer;
+       void **pslot;
 
        if (!mapping) {
-               /* Anonymous page */
+               /* Anonymous page without mapping */
                if (page_count(page) != 1)
                        return -EAGAIN;
                return 0;
@@ -303,12 +304,11 @@ static int migrate_page_move_mapping(struct address_space *mapping,
 
        write_lock_irq(&mapping->tree_lock);
 
-       radix_pointer = (struct page **)radix_tree_lookup_slot(
-                                               &mapping->page_tree,
-                                               page_index(page));
+       pslot = radix_tree_lookup_slot(&mapping->page_tree,
+                                       page_index(page));
 
        if (page_count(page) != 2 + !!PagePrivate(page) ||
-                       *radix_pointer != page) {
+                       (struct page *)radix_tree_deref_slot(pslot) != page) {
                write_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -316,7 +316,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        /*
         * Now we know that no one else is looking at the page.
         */
-       get_page(newpage);
+       get_page(newpage);      /* add cache reference */
 #ifdef CONFIG_SWAP
        if (PageSwapCache(page)) {
                SetPageSwapCache(newpage);
@@ -324,8 +324,27 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        }
 #endif
 
-       *radix_pointer = newpage;
+       radix_tree_replace_slot(pslot, newpage);
+
+       /*
+        * Drop cache reference from old page.
+        * We know this isn't the last reference.
+        */
        __put_page(page);
+
+       /*
+        * If moved to a different zone then also account
+        * the page for that zone. Other VM counters will be
+        * taken care of when we establish references to the
+        * new page and drop references to the old page.
+        *
+        * Note that anonymous pages are accounted for
+        * via NR_FILE_PAGES and NR_ANON_PAGES if they
+        * are mapped to swap space.
+        */
+       __dec_zone_page_state(page, NR_FILE_PAGES);
+       __inc_zone_page_state(newpage, NR_FILE_PAGES);
+
        write_unlock_irq(&mapping->tree_lock);
 
        return 0;
@@ -407,6 +426,7 @@ int migrate_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(migrate_page);
 
+#ifdef CONFIG_BLOCK
 /*
  * Migration function for pages with buffers. This function can only be used
  * if the underlying filesystem guarantees that no other references to "page"
@@ -464,6 +484,7 @@ int buffer_migrate_page(struct address_space *mapping,
        return 0;
 }
 EXPORT_SYMBOL(buffer_migrate_page);
+#endif
 
 /*
  * Writeback a page to clean the dirty state
@@ -523,7 +544,7 @@ static int fallback_migrate_page(struct address_space *mapping,
         * Buffers may be managed in a filesystem specific way.
         * We must have no buffers or drop them.
         */
-       if (page_has_buffers(page) &&
+       if (PagePrivate(page) &&
            !try_to_release_page(page, GFP_KERNEL))
                return -EAGAIN;
 
@@ -588,7 +609,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                        struct page *page, int force)
 {
        int rc = 0;
-       struct page *newpage = get_new_page(page, private);
+       int *result = NULL;
+       struct page *newpage = get_new_page(page, private, &result);
+       int rcu_locked = 0;
 
        if (!newpage)
                return -ENOMEM;
@@ -609,20 +632,41 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                        goto unlock;
                wait_on_page_writeback(page);
        }
-
        /*
-        * Establish migration ptes or remove ptes
+        * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
+        * we cannot notice that anon_vma is freed while we migrates a page.
+        * This rcu_read_lock() delays freeing anon_vma pointer until the end
+        * of migration. File cache pages are no problem because of page_lock()
+        * File Caches may use write_page() or lock_page() in migration, then,
+        * just care Anon page here.
         */
-       if (try_to_unmap(page, 1) != SWAP_FAIL) {
-               if (!page_mapped(page))
-                       rc = move_to_new_page(newpage, page);
-       } else
-               /* A vma has VM_LOCKED set -> permanent failure */
-               rc = -EPERM;
+       if (PageAnon(page)) {
+               rcu_read_lock();
+               rcu_locked = 1;
+       }
+       /*
+        * This is a corner case handling.
+        * When a new swap-cache is read into, it is linked to LRU
+        * and treated as swapcache but has no rmap yet.
+        * Calling try_to_unmap() against a page->mapping==NULL page is
+        * BUG. So handle it here.
+        */
+       if (!page->mapping)
+               goto rcu_unlock;
+       /* Establish migration ptes or remove ptes */
+       try_to_unmap(page, 1);
+
+       if (!page_mapped(page))
+               rc = move_to_new_page(newpage, page);
 
        if (rc)
                remove_migration_ptes(page, page);
+rcu_unlock:
+       if (rcu_locked)
+               rcu_read_unlock();
+
 unlock:
+
        unlock_page(page);
 
        if (rc != -EAGAIN) {
@@ -642,6 +686,12 @@ move_newpage:
         * then this will free the page.
         */
        move_to_lru(newpage);
+       if (result) {
+               if (rc)
+                       *result = rc;
+               else
+                       *result = page_to_nid(newpage);
+       }
        return rc;
 }
 
@@ -710,3 +760,282 @@ out:
        return nr_failed + retry;
 }
 
+#ifdef CONFIG_NUMA
+/*
+ * Move a list of individual pages
+ */
+struct page_to_node {
+       unsigned long addr;
+       struct page *page;
+       int node;
+       int status;
+};
+
+static struct page *new_page_node(struct page *p, unsigned long private,
+               int **result)
+{
+       struct page_to_node *pm = (struct page_to_node *)private;
+
+       while (pm->node != MAX_NUMNODES && pm->page != p)
+               pm++;
+
+       if (pm->node == MAX_NUMNODES)
+               return NULL;
+
+       *result = &pm->status;
+
+       return alloc_pages_node(pm->node,
+                               GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
+}
+
+/*
+ * Move a set of pages as indicated in the pm array. The addr
+ * field must be set to the virtual address of the page to be moved
+ * and the node number must contain a valid target node.
+ */
+static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
+                               int migrate_all)
+{
+       int err;
+       struct page_to_node *pp;
+       LIST_HEAD(pagelist);
+
+       down_read(&mm->mmap_sem);
+
+       /*
+        * Build a list of pages to migrate
+        */
+       migrate_prep();
+       for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
+               struct vm_area_struct *vma;
+               struct page *page;
+
+               /*
+                * A valid page pointer that will not match any of the
+                * pages that will be moved.
+                */
+               pp->page = ZERO_PAGE(0);
+
+               err = -EFAULT;
+               vma = find_vma(mm, pp->addr);
+               if (!vma || !vma_migratable(vma))
+                       goto set_status;
+
+               page = follow_page(vma, pp->addr, FOLL_GET);
+               err = -ENOENT;
+               if (!page)
+                       goto set_status;
+
+               if (PageReserved(page))         /* Check for zero page */
+                       goto put_and_set;
+
+               pp->page = page;
+               err = page_to_nid(page);
+
+               if (err == pp->node)
+                       /*
+                        * Node already in the right place
+                        */
+                       goto put_and_set;
+
+               err = -EACCES;
+               if (page_mapcount(page) > 1 &&
+                               !migrate_all)
+                       goto put_and_set;
+
+               err = isolate_lru_page(page, &pagelist);
+put_and_set:
+               /*
+                * Either remove the duplicate refcount from
+                * isolate_lru_page() or drop the page ref if it was
+                * not isolated.
+                */
+               put_page(page);
+set_status:
+               pp->status = err;
+       }
+
+       if (!list_empty(&pagelist))
+               err = migrate_pages(&pagelist, new_page_node,
+                               (unsigned long)pm);
+       else
+               err = -ENOENT;
+
+       up_read(&mm->mmap_sem);
+       return err;
+}
+
+/*
+ * Determine the nodes of a list of pages. The addr in the pm array
+ * must have been set to the virtual address of which we want to determine
+ * the node number.
+ */
+static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm)
+{
+       down_read(&mm->mmap_sem);
+
+       for ( ; pm->node != MAX_NUMNODES; pm++) {
+               struct vm_area_struct *vma;
+               struct page *page;
+               int err;
+
+               err = -EFAULT;
+               vma = find_vma(mm, pm->addr);
+               if (!vma)
+                       goto set_status;
+
+               page = follow_page(vma, pm->addr, 0);
+               err = -ENOENT;
+               /* Use PageReserved to check for zero page */
+               if (!page || PageReserved(page))
+                       goto set_status;
+
+               err = page_to_nid(page);
+set_status:
+               pm->status = err;
+       }
+
+       up_read(&mm->mmap_sem);
+       return 0;
+}
+
+/*
+ * Move a list of pages in the address space of the currently executing
+ * process.
+ */
+asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
+                       const void __user * __user *pages,
+                       const int __user *nodes,
+                       int __user *status, int flags)
+{
+       int err = 0;
+       int i;
+       struct task_struct *task;
+       nodemask_t task_nodes;
+       struct mm_struct *mm;
+       struct page_to_node *pm = NULL;
+
+       /* Check flags */
+       if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
+               return -EINVAL;
+
+       if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
+               return -EPERM;
+
+       /* Find the mm_struct */
+       read_lock(&tasklist_lock);
+       task = pid ? find_task_by_pid(pid) : current;
+       if (!task) {
+               read_unlock(&tasklist_lock);
+               return -ESRCH;
+       }
+       mm = get_task_mm(task);
+       read_unlock(&tasklist_lock);
+
+       if (!mm)
+               return -EINVAL;
+
+       /*
+        * Check if this process has the right to modify the specified
+        * process. The right exists if the process has administrative
+        * capabilities, superuser privileges or the same
+        * userid as the target process.
+        */
+       if ((current->euid != task->suid) && (current->euid != task->uid) &&
+           (current->uid != task->suid) && (current->uid != task->uid) &&
+           !capable(CAP_SYS_NICE)) {
+               err = -EPERM;
+               goto out2;
+       }
+
+       err = security_task_movememory(task);
+       if (err)
+               goto out2;
+
+
+       task_nodes = cpuset_mems_allowed(task);
+
+       /* Limit nr_pages so that the multiplication may not overflow */
+       if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
+               err = -E2BIG;
+               goto out2;
+       }
+
+       pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
+       if (!pm) {
+               err = -ENOMEM;
+               goto out2;
+       }
+
+       /*
+        * Get parameters from user space and initialize the pm
+        * array. Return various errors if the user did something wrong.
+        */
+       for (i = 0; i < nr_pages; i++) {
+               const void *p;
+
+               err = -EFAULT;
+               if (get_user(p, pages + i))
+                       goto out;
+
+               pm[i].addr = (unsigned long)p;
+               if (nodes) {
+                       int node;
+
+                       if (get_user(node, nodes + i))
+                               goto out;
+
+                       err = -ENODEV;
+                       if (!node_online(node))
+                               goto out;
+
+                       err = -EACCES;
+                       if (!node_isset(node, task_nodes))
+                               goto out;
+
+                       pm[i].node = node;
+               } else
+                       pm[i].node = 0; /* anything to not match MAX_NUMNODES */
+       }
+       /* End marker */
+       pm[nr_pages].node = MAX_NUMNODES;
+
+       if (nodes)
+               err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL);
+       else
+               err = do_pages_stat(mm, pm);
+
+       if (err >= 0)
+               /* Return status information */
+               for (i = 0; i < nr_pages; i++)
+                       if (put_user(pm[i].status, status + i))
+                               err = -EFAULT;
+
+out:
+       vfree(pm);
+out2:
+       mmput(mm);
+       return err;
+}
+#endif
+
+/*
+ * Call migration functions in the vma_ops that may prepare
+ * memory in a vm for migration. migration functions may perform
+ * the migration for vmas that do not have an underlying page struct.
+ */
+int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
+       const nodemask_t *from, unsigned long flags)
+{
+       struct vm_area_struct *vma;
+       int err = 0;
+
+       for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
+               if (vma->vm_ops && vma->vm_ops->migrate) {
+                       err = vma->vm_ops->migrate(vma, to, from, flags);
+                       if (err)
+                               break;
+               }
+       }
+       return err;
+}