2 * Memory Migration functionality - linux/mm/migration.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/pagevec.h>
23 #include <linux/rmap.h>
24 #include <linux/topology.h>
25 #include <linux/cpu.h>
26 #include <linux/cpuset.h>
27 #include <linux/writeback.h>
31 /* The maximum number of pages to take off the LRU for migration */
32 #define MIGRATE_CHUNK_SIZE 256
34 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
37 * Isolate one page from the LRU lists. If successful put it onto
38 * the indicated list with elevated page count.
41 * -EBUSY: page not on LRU list
42 * 0: page removed from LRU list and added to the specified list.
44 int isolate_lru_page(struct page *page, struct list_head *pagelist)
49 struct zone *zone = page_zone(page);
51 spin_lock_irq(&zone->lru_lock);
57 del_page_from_active_list(zone, page);
59 del_page_from_inactive_list(zone, page);
60 list_add_tail(&page->lru, pagelist);
62 spin_unlock_irq(&zone->lru_lock);
68 * migrate_prep() needs to be called after we have compiled the list of pages
69 * to be migrated using isolate_lru_page() but before we begin a series of calls
72 int migrate_prep(void)
75 * Clear the LRU lists so pages can be isolated.
76 * Note that pages may be moved off the LRU after we have
77 * drained them. Those pages will fail to migrate like other
78 * pages that may be busy.
85 static inline void move_to_lru(struct page *page)
87 if (PageActive(page)) {
89 * lru_cache_add_active checks that
90 * the PG_active bit is off.
92 ClearPageActive(page);
93 lru_cache_add_active(page);
101 * Add isolated pages on the list back to the LRU.
103 * returns the number of pages put back.
105 int putback_lru_pages(struct list_head *l)
111 list_for_each_entry_safe(page, page2, l, lru) {
112 list_del(&page->lru);
119 static inline int is_swap_pte(pte_t pte)
121 return !pte_none(pte) && !pte_present(pte) && !pte_file(pte);
125 * Restore a potential migration pte to a working pte entry
127 static void remove_migration_pte(struct vm_area_struct *vma,
128 struct page *old, struct page *new)
130 struct mm_struct *mm = vma->vm_mm;
137 unsigned long addr = page_address_in_vma(new, vma);
142 pgd = pgd_offset(mm, addr);
143 if (!pgd_present(*pgd))
146 pud = pud_offset(pgd, addr);
147 if (!pud_present(*pud))
150 pmd = pmd_offset(pud, addr);
151 if (!pmd_present(*pmd))
154 ptep = pte_offset_map(pmd, addr);
156 if (!is_swap_pte(*ptep)) {
161 ptl = pte_lockptr(mm, pmd);
164 if (!is_swap_pte(pte))
167 entry = pte_to_swp_entry(pte);
169 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
173 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
174 if (is_write_migration_entry(entry))
175 pte = pte_mkwrite(pte);
176 set_pte_at(mm, addr, ptep, pte);
179 page_add_anon_rmap(new, vma, addr);
181 page_add_file_rmap(new);
183 /* No need to invalidate - it was non-present before */
184 update_mmu_cache(vma, addr, pte);
185 lazy_mmu_prot_update(pte);
188 pte_unmap_unlock(ptep, ptl);
192 * Note that remove_file_migration_ptes will only work on regular mappings,
193 * Nonlinear mappings do not use migration entries.
195 static void remove_file_migration_ptes(struct page *old, struct page *new)
197 struct vm_area_struct *vma;
198 struct address_space *mapping = page_mapping(new);
199 struct prio_tree_iter iter;
200 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
205 spin_lock(&mapping->i_mmap_lock);
207 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
208 remove_migration_pte(vma, old, new);
210 spin_unlock(&mapping->i_mmap_lock);
214 * Must hold mmap_sem lock on at least one of the vmas containing
215 * the page so that the anon_vma cannot vanish.
217 static void remove_anon_migration_ptes(struct page *old, struct page *new)
219 struct anon_vma *anon_vma;
220 struct vm_area_struct *vma;
221 unsigned long mapping;
223 mapping = (unsigned long)new->mapping;
225 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
229 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
231 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
232 spin_lock(&anon_vma->lock);
234 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
235 remove_migration_pte(vma, old, new);
237 spin_unlock(&anon_vma->lock);
241 * Get rid of all migration entries and replace them by
242 * references to the indicated page.
244 static void remove_migration_ptes(struct page *old, struct page *new)
247 remove_anon_migration_ptes(old, new);
249 remove_file_migration_ptes(old, new);
253 * Something used the pte of a page under migration. We need to
254 * get to the page and wait until migration is finished.
255 * When we return from this function the fault will be retried.
257 * This function is called from do_swap_page().
259 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
260 unsigned long address)
267 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
269 if (!is_swap_pte(pte))
272 entry = pte_to_swp_entry(pte);
273 if (!is_migration_entry(entry))
276 page = migration_entry_to_page(entry);
279 pte_unmap_unlock(ptep, ptl);
280 wait_on_page_locked(page);
284 pte_unmap_unlock(ptep, ptl);
288 * Replace the page in the mapping.
290 * The number of remaining references must be:
291 * 1 for anonymous pages without a mapping
292 * 2 for pages with a mapping
293 * 3 for pages with a mapping and PagePrivate set.
295 static int migrate_page_move_mapping(struct address_space *mapping,
296 struct page *newpage, struct page *page)
298 struct page **radix_pointer;
302 if (page_count(page) != 1)
307 write_lock_irq(&mapping->tree_lock);
309 radix_pointer = (struct page **)radix_tree_lookup_slot(
313 if (page_count(page) != 2 + !!PagePrivate(page) ||
314 *radix_pointer != page) {
315 write_unlock_irq(&mapping->tree_lock);
320 * Now we know that no one else is looking at the page.
324 if (PageSwapCache(page)) {
325 SetPageSwapCache(newpage);
326 set_page_private(newpage, page_private(page));
330 *radix_pointer = newpage;
332 write_unlock_irq(&mapping->tree_lock);
338 * Copy the page to its new location
340 static void migrate_page_copy(struct page *newpage, struct page *page)
342 copy_highpage(newpage, page);
345 SetPageError(newpage);
346 if (PageReferenced(page))
347 SetPageReferenced(newpage);
348 if (PageUptodate(page))
349 SetPageUptodate(newpage);
350 if (PageActive(page))
351 SetPageActive(newpage);
352 if (PageChecked(page))
353 SetPageChecked(newpage);
354 if (PageMappedToDisk(page))
355 SetPageMappedToDisk(newpage);
357 if (PageDirty(page)) {
358 clear_page_dirty_for_io(page);
359 set_page_dirty(newpage);
363 ClearPageSwapCache(page);
365 ClearPageActive(page);
366 ClearPagePrivate(page);
367 set_page_private(page, 0);
368 page->mapping = NULL;
371 * If any waiters have accumulated on the new page then
374 if (PageWriteback(newpage))
375 end_page_writeback(newpage);
378 /************************************************************
379 * Migration functions
380 ***********************************************************/
382 /* Always fail migration. Used for mappings that are not movable */
383 int fail_migrate_page(struct address_space *mapping,
384 struct page *newpage, struct page *page)
388 EXPORT_SYMBOL(fail_migrate_page);
391 * Common logic to directly migrate a single page suitable for
392 * pages that do not use PagePrivate.
394 * Pages are locked upon entry and exit.
396 int migrate_page(struct address_space *mapping,
397 struct page *newpage, struct page *page)
401 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
403 rc = migrate_page_move_mapping(mapping, newpage, page);
408 migrate_page_copy(newpage, page);
411 EXPORT_SYMBOL(migrate_page);
414 * Migration function for pages with buffers. This function can only be used
415 * if the underlying filesystem guarantees that no other references to "page"
418 int buffer_migrate_page(struct address_space *mapping,
419 struct page *newpage, struct page *page)
421 struct buffer_head *bh, *head;
424 if (!page_has_buffers(page))
425 return migrate_page(mapping, newpage, page);
427 head = page_buffers(page);
429 rc = migrate_page_move_mapping(mapping, newpage, page);
438 bh = bh->b_this_page;
440 } while (bh != head);
442 ClearPagePrivate(page);
443 set_page_private(newpage, page_private(page));
444 set_page_private(page, 0);
450 set_bh_page(bh, newpage, bh_offset(bh));
451 bh = bh->b_this_page;
453 } while (bh != head);
455 SetPagePrivate(newpage);
457 migrate_page_copy(newpage, page);
463 bh = bh->b_this_page;
465 } while (bh != head);
469 EXPORT_SYMBOL(buffer_migrate_page);
472 * Writeback a page to clean the dirty state
474 static int writeout(struct address_space *mapping, struct page *page)
476 struct writeback_control wbc = {
477 .sync_mode = WB_SYNC_NONE,
480 .range_end = LLONG_MAX,
486 if (!mapping->a_ops->writepage)
487 /* No write method for the address space */
490 if (!clear_page_dirty_for_io(page))
491 /* Someone else already triggered a write */
495 * A dirty page may imply that the underlying filesystem has
496 * the page on some queue. So the page must be clean for
497 * migration. Writeout may mean we loose the lock and the
498 * page state is no longer what we checked for earlier.
499 * At this point we know that the migration attempt cannot
502 remove_migration_ptes(page, page);
504 rc = mapping->a_ops->writepage(page, &wbc);
506 /* I/O Error writing */
509 if (rc != AOP_WRITEPAGE_ACTIVATE)
510 /* unlocked. Relock */
517 * Default handling if a filesystem does not provide a migration function.
519 static int fallback_migrate_page(struct address_space *mapping,
520 struct page *newpage, struct page *page)
523 return writeout(mapping, page);
526 * Buffers may be managed in a filesystem specific way.
527 * We must have no buffers or drop them.
529 if (page_has_buffers(page) &&
530 !try_to_release_page(page, GFP_KERNEL))
533 return migrate_page(mapping, newpage, page);
537 * Move a page to a newly allocated page
538 * The page is locked and all ptes have been successfully removed.
540 * The new page will have replaced the old page if this function
543 static int move_to_new_page(struct page *newpage, struct page *page)
545 struct address_space *mapping;
549 * Block others from accessing the page when we get around to
550 * establishing additional references. We are the only one
551 * holding a reference to the new page at this point.
553 if (TestSetPageLocked(newpage))
556 /* Prepare mapping for the new page.*/
557 newpage->index = page->index;
558 newpage->mapping = page->mapping;
560 mapping = page_mapping(page);
562 rc = migrate_page(mapping, newpage, page);
563 else if (mapping->a_ops->migratepage)
565 * Most pages have a mapping and most filesystems
566 * should provide a migration function. Anonymous
567 * pages are part of swap space which also has its
568 * own migration function. This is the most common
569 * path for page migration.
571 rc = mapping->a_ops->migratepage(mapping,
574 rc = fallback_migrate_page(mapping, newpage, page);
577 remove_migration_ptes(page, newpage);
579 newpage->mapping = NULL;
581 unlock_page(newpage);
587 * Obtain the lock on page, remove all ptes and migrate the page
588 * to the newly allocated page in newpage.
590 static int unmap_and_move(struct page *newpage, struct page *page, int force)
594 if (page_count(page) == 1)
595 /* page was freed from under us. So we are done. */
599 if (TestSetPageLocked(page)) {
605 if (PageWriteback(page)) {
608 wait_on_page_writeback(page);
612 * Establish migration ptes or remove ptes
614 if (try_to_unmap(page, 1) != SWAP_FAIL) {
615 if (!page_mapped(page))
616 rc = move_to_new_page(newpage, page);
618 /* A vma has VM_LOCKED set -> permanent failure */
622 remove_migration_ptes(page, page);
628 * A page that has been migrated has all references
629 * removed and will be freed. A page that has not been
630 * migrated will have kepts its references and be
633 list_del(&page->lru);
636 list_del(&newpage->lru);
637 move_to_lru(newpage);
645 * Two lists are passed to this function. The first list
646 * contains the pages isolated from the LRU to be migrated.
647 * The second list contains new pages that the isolated pages
650 * The function returns after 10 attempts or if no pages
651 * are movable anymore because to has become empty
652 * or no retryable pages exist anymore. All pages will be
653 * retruned to the LRU or freed.
655 * Return: Number of pages not migrated.
657 int migrate_pages(struct list_head *from, struct list_head *to)
664 int swapwrite = current->flags & PF_SWAPWRITE;
668 current->flags |= PF_SWAPWRITE;
670 for(pass = 0; pass < 10 && retry; pass++) {
673 list_for_each_entry_safe(page, page2, from, lru) {
680 rc = unmap_and_move(lru_to_page(to), page, pass > 2);
689 /* Permanent failure */
697 current->flags &= ~PF_SWAPWRITE;
699 putback_lru_pages(from);
700 return nr_failed + retry;
704 * Migrate the list 'pagelist' of pages to a certain destination.
706 * Specify destination with either non-NULL vma or dest_node >= 0
707 * Return the number of pages not migrated or error code
709 int migrate_pages_to(struct list_head *pagelist,
710 struct vm_area_struct *vma, int dest)
714 unsigned long offset = 0;
722 list_for_each(p, pagelist) {
725 * The address passed to alloc_page_vma is used to
726 * generate the proper interleave behavior. We fake
727 * the address here by an increasing offset in order
728 * to get the proper distribution of pages.
730 * No decision has been made as to which page
731 * a certain old page is moved to so we cannot
732 * specify the correct address.
734 page = alloc_page_vma(GFP_HIGHUSER, vma,
735 offset + vma->vm_start);
739 page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
745 list_add_tail(&page->lru, &newlist);
747 if (nr_pages > MIGRATE_CHUNK_SIZE)
750 err = migrate_pages(pagelist, &newlist);
754 if (list_empty(&newlist) && !list_empty(pagelist))
759 /* Calculate number of leftover pages */
760 list_for_each(p, pagelist)