2 * Memory Migration functionality - linux/mm/migration.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/pagevec.h>
23 #include <linux/rmap.h>
24 #include <linux/topology.h>
25 #include <linux/cpu.h>
26 #include <linux/cpuset.h>
30 /* The maximum number of pages to take off the LRU for migration */
31 #define MIGRATE_CHUNK_SIZE 256
33 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
36 * Isolate one page from the LRU lists. If successful put it onto
37 * the indicated list with elevated page count.
40 * -EBUSY: page not on LRU list
41 * 0: page removed from LRU list and added to the specified list.
43 int isolate_lru_page(struct page *page, struct list_head *pagelist)
48 struct zone *zone = page_zone(page);
50 spin_lock_irq(&zone->lru_lock);
56 del_page_from_active_list(zone, page);
58 del_page_from_inactive_list(zone, page);
59 list_add_tail(&page->lru, pagelist);
61 spin_unlock_irq(&zone->lru_lock);
67 * migrate_prep() needs to be called after we have compiled the list of pages
68 * to be migrated using isolate_lru_page() but before we begin a series of calls
71 int migrate_prep(void)
74 * Clear the LRU lists so pages can be isolated.
75 * Note that pages may be moved off the LRU after we have
76 * drained them. Those pages will fail to migrate like other
77 * pages that may be busy.
84 static inline void move_to_lru(struct page *page)
87 if (PageActive(page)) {
89 * lru_cache_add_active checks that
90 * the PG_active bit is off.
92 ClearPageActive(page);
93 lru_cache_add_active(page);
101 * Add isolated pages on the list back to the LRU.
103 * returns the number of pages put back.
105 int putback_lru_pages(struct list_head *l)
111 list_for_each_entry_safe(page, page2, l, lru) {
118 static inline int is_swap_pte(pte_t pte)
120 return !pte_none(pte) && !pte_present(pte) && !pte_file(pte);
124 * Restore a potential migration pte to a working pte entry
126 static void remove_migration_pte(struct vm_area_struct *vma, unsigned long addr,
127 struct page *old, struct page *new)
129 struct mm_struct *mm = vma->vm_mm;
137 pgd = pgd_offset(mm, addr);
138 if (!pgd_present(*pgd))
141 pud = pud_offset(pgd, addr);
142 if (!pud_present(*pud))
145 pmd = pmd_offset(pud, addr);
146 if (!pmd_present(*pmd))
149 ptep = pte_offset_map(pmd, addr);
151 if (!is_swap_pte(*ptep)) {
156 ptl = pte_lockptr(mm, pmd);
159 if (!is_swap_pte(pte))
162 entry = pte_to_swp_entry(pte);
164 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
168 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
169 if (is_write_migration_entry(entry))
170 pte = pte_mkwrite(pte);
171 set_pte_at(mm, addr, ptep, pte);
172 page_add_anon_rmap(new, vma, addr);
174 pte_unmap_unlock(ptep, ptl);
178 * Get rid of all migration entries and replace them by
179 * references to the indicated page.
181 * Must hold mmap_sem lock on at least one of the vmas containing
182 * the page so that the anon_vma cannot vanish.
184 static void remove_migration_ptes(struct page *old, struct page *new)
186 struct anon_vma *anon_vma;
187 struct vm_area_struct *vma;
188 unsigned long mapping;
190 mapping = (unsigned long)new->mapping;
192 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
196 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
198 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
199 spin_lock(&anon_vma->lock);
201 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
202 remove_migration_pte(vma, page_address_in_vma(new, vma),
205 spin_unlock(&anon_vma->lock);
209 * Something used the pte of a page under migration. We need to
210 * get to the page and wait until migration is finished.
211 * When we return from this function the fault will be retried.
213 * This function is called from do_swap_page().
215 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
216 unsigned long address)
223 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
225 if (!is_swap_pte(pte))
228 entry = pte_to_swp_entry(pte);
229 if (!is_migration_entry(entry))
232 page = migration_entry_to_page(entry);
235 pte_unmap_unlock(ptep, ptl);
236 wait_on_page_locked(page);
240 pte_unmap_unlock(ptep, ptl);
244 * Replace the page in the mapping.
246 * The number of remaining references must be:
247 * 1 for anonymous pages without a mapping
248 * 2 for pages with a mapping
249 * 3 for pages with a mapping and PagePrivate set.
251 static int migrate_page_move_mapping(struct address_space *mapping,
252 struct page *newpage, struct page *page)
254 struct page **radix_pointer;
258 if (page_count(page) != 1)
263 write_lock_irq(&mapping->tree_lock);
265 radix_pointer = (struct page **)radix_tree_lookup_slot(
269 if (page_count(page) != 2 + !!PagePrivate(page) ||
270 *radix_pointer != page) {
271 write_unlock_irq(&mapping->tree_lock);
276 * Now we know that no one else is looking at the page.
280 if (PageSwapCache(page)) {
281 SetPageSwapCache(newpage);
282 set_page_private(newpage, page_private(page));
286 *radix_pointer = newpage;
288 write_unlock_irq(&mapping->tree_lock);
294 * Copy the page to its new location
296 static void migrate_page_copy(struct page *newpage, struct page *page)
298 copy_highpage(newpage, page);
301 SetPageError(newpage);
302 if (PageReferenced(page))
303 SetPageReferenced(newpage);
304 if (PageUptodate(page))
305 SetPageUptodate(newpage);
306 if (PageActive(page))
307 SetPageActive(newpage);
308 if (PageChecked(page))
309 SetPageChecked(newpage);
310 if (PageMappedToDisk(page))
311 SetPageMappedToDisk(newpage);
313 if (PageDirty(page)) {
314 clear_page_dirty_for_io(page);
315 set_page_dirty(newpage);
319 ClearPageSwapCache(page);
321 ClearPageActive(page);
322 ClearPagePrivate(page);
323 set_page_private(page, 0);
324 page->mapping = NULL;
327 * If any waiters have accumulated on the new page then
330 if (PageWriteback(newpage))
331 end_page_writeback(newpage);
334 /************************************************************
335 * Migration functions
336 ***********************************************************/
338 /* Always fail migration. Used for mappings that are not movable */
339 int fail_migrate_page(struct address_space *mapping,
340 struct page *newpage, struct page *page)
344 EXPORT_SYMBOL(fail_migrate_page);
347 * Common logic to directly migrate a single page suitable for
348 * pages that do not use PagePrivate.
350 * Pages are locked upon entry and exit.
352 int migrate_page(struct address_space *mapping,
353 struct page *newpage, struct page *page)
357 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
359 rc = migrate_page_move_mapping(mapping, newpage, page);
364 migrate_page_copy(newpage, page);
367 EXPORT_SYMBOL(migrate_page);
370 * Migration function for pages with buffers. This function can only be used
371 * if the underlying filesystem guarantees that no other references to "page"
374 int buffer_migrate_page(struct address_space *mapping,
375 struct page *newpage, struct page *page)
377 struct buffer_head *bh, *head;
380 if (!page_has_buffers(page))
381 return migrate_page(mapping, newpage, page);
383 head = page_buffers(page);
385 rc = migrate_page_move_mapping(mapping, newpage, page);
394 bh = bh->b_this_page;
396 } while (bh != head);
398 ClearPagePrivate(page);
399 set_page_private(newpage, page_private(page));
400 set_page_private(page, 0);
406 set_bh_page(bh, newpage, bh_offset(bh));
407 bh = bh->b_this_page;
409 } while (bh != head);
411 SetPagePrivate(newpage);
413 migrate_page_copy(newpage, page);
419 bh = bh->b_this_page;
421 } while (bh != head);
425 EXPORT_SYMBOL(buffer_migrate_page);
427 static int fallback_migrate_page(struct address_space *mapping,
428 struct page *newpage, struct page *page)
431 * Default handling if a filesystem does not provide
432 * a migration function. We can only migrate clean
433 * pages so try to write out any dirty pages first.
435 if (PageDirty(page)) {
436 switch (pageout(page, mapping)) {
442 /* Relock since we lost the lock */
444 /* Must retry since page state may have changed */
448 ; /* try to migrate the page below */
453 * Buffers may be managed in a filesystem specific way.
454 * We must have no buffers or drop them.
456 if (page_has_buffers(page) &&
457 !try_to_release_page(page, GFP_KERNEL))
460 return migrate_page(mapping, newpage, page);
466 * Two lists are passed to this function. The first list
467 * contains the pages isolated from the LRU to be migrated.
468 * The second list contains new pages that the pages isolated
471 * The function returns after 10 attempts or if no pages
472 * are movable anymore because to has become empty
473 * or no retryable pages exist anymore.
475 * Return: Number of pages not migrated when "to" ran empty.
477 int migrate_pages(struct list_head *from, struct list_head *to,
478 struct list_head *moved, struct list_head *failed)
485 int swapwrite = current->flags & PF_SWAPWRITE;
489 current->flags |= PF_SWAPWRITE;
494 list_for_each_entry_safe(page, page2, from, lru) {
495 struct page *newpage = NULL;
496 struct address_space *mapping;
501 if (page_count(page) == 1)
502 /* page was freed from under us. So we are done. */
505 if (to && list_empty(to))
509 * Skip locked pages during the first two passes to give the
510 * functions holding the lock time to release the page. Later we
511 * use lock_page() to have a higher chance of acquiring the
518 if (TestSetPageLocked(page))
522 * Only wait on writeback if we have already done a pass where
523 * we we may have triggered writeouts for lots of pages.
526 wait_on_page_writeback(page);
528 if (PageWriteback(page))
532 * Establish migration ptes or remove ptes
535 if (try_to_unmap(page, 1) == SWAP_FAIL)
536 /* A vma has VM_LOCKED set -> permanent failure */
540 if (page_mapped(page))
543 newpage = lru_to_page(to);
545 /* Prepare mapping for the new page.*/
546 newpage->index = page->index;
547 newpage->mapping = page->mapping;
550 * Pages are properly locked and writeback is complete.
551 * Try to migrate the page.
553 mapping = page_mapping(page);
555 rc = migrate_page(mapping, newpage, page);
557 else if (mapping->a_ops->migratepage)
559 * Most pages have a mapping and most filesystems
560 * should provide a migration function. Anonymous
561 * pages are part of swap space which also has its
562 * own migration function. This is the most common
563 * path for page migration.
565 rc = mapping->a_ops->migratepage(mapping,
568 rc = fallback_migrate_page(mapping, newpage, page);
571 remove_migration_ptes(page, newpage);
573 unlock_page(newpage);
577 remove_migration_ptes(page, page);
584 newpage->mapping = NULL;
589 /* Permanent failure */
590 list_move(&page->lru, failed);
595 /* Successful migration. Return page to LRU */
596 move_to_lru(newpage);
598 list_move(&page->lru, moved);
601 if (retry && pass++ < 10)
605 current->flags &= ~PF_SWAPWRITE;
607 return nr_failed + retry;
611 * Migrate the list 'pagelist' of pages to a certain destination.
613 * Specify destination with either non-NULL vma or dest_node >= 0
614 * Return the number of pages not migrated or error code
616 int migrate_pages_to(struct list_head *pagelist,
617 struct vm_area_struct *vma, int dest)
623 unsigned long offset = 0;
630 list_for_each(p, pagelist) {
633 * The address passed to alloc_page_vma is used to
634 * generate the proper interleave behavior. We fake
635 * the address here by an increasing offset in order
636 * to get the proper distribution of pages.
638 * No decision has been made as to which page
639 * a certain old page is moved to so we cannot
640 * specify the correct address.
642 page = alloc_page_vma(GFP_HIGHUSER, vma,
643 offset + vma->vm_start);
647 page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
653 list_add_tail(&page->lru, &newlist);
655 if (nr_pages > MIGRATE_CHUNK_SIZE)
658 err = migrate_pages(pagelist, &newlist, &moved, &failed);
660 putback_lru_pages(&moved); /* Call release pages instead ?? */
662 if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
665 /* Return leftover allocated pages */
666 while (!list_empty(&newlist)) {
667 page = list_entry(newlist.next, struct page, lru);
668 list_del(&page->lru);
671 list_splice(&failed, pagelist);
675 /* Calculate number of leftover pages */
677 list_for_each(p, pagelist)