X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fhugetlb.c;h=efd78527ad1ee62159d8955069756fdaddf893a1;hb=54f9f80d6543fb7b157d3b11e2e7911dc1379790;hp=f127940ec24fc8c52e6492de4e5729ce330c487b;hpb=d9ff963801e4f7648c55a27413a1b1de59480a30;p=linux-2.6 diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f127940ec2..efd78527ad 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -23,12 +23,15 @@ const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; +static unsigned long surplus_huge_pages; unsigned long max_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES]; +static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; static gfp_t htlb_alloc_mask = GFP_HIGHUSER; unsigned long hugepages_treat_as_movable; +int hugetlb_dynamic_pool; /* * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages @@ -42,7 +45,7 @@ static void clear_huge_page(struct page *page, unsigned long addr) might_sleep(); for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { cond_resched(); - clear_user_highpage(page + i, addr); + clear_user_highpage(page + i, addr + i * PAGE_SIZE); } } @@ -71,8 +74,9 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma, { int nid; struct page *page = NULL; + struct mempolicy *mpol; struct zonelist *zonelist = huge_zonelist(vma, address, - htlb_alloc_mask); + htlb_alloc_mask, &mpol); struct zone **z; for (z = zonelist->zones; *z; z++) { @@ -84,22 +88,83 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma, list_del(&page->lru); free_huge_pages--; free_huge_pages_node[nid]--; + if (vma && vma->vm_flags & VM_MAYSHARE) + resv_huge_pages--; + break; } } + mpol_free(mpol); /* unref if mpol !NULL */ return page; } +static void update_and_free_page(struct page *page) +{ + int i; + nr_huge_pages--; + nr_huge_pages_node[page_to_nid(page)]--; + for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { + page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | + 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | + 1 << PG_private | 1<< PG_writeback); + } + set_compound_page_dtor(page, NULL); + set_page_refcounted(page); + __free_pages(page, HUGETLB_PAGE_ORDER); +} + static void free_huge_page(struct page *page) { - BUG_ON(page_count(page)); + int nid = page_to_nid(page); + BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); spin_lock(&hugetlb_lock); - enqueue_huge_page(page); + if (surplus_huge_pages_node[nid]) { + update_and_free_page(page); + surplus_huge_pages--; + surplus_huge_pages_node[nid]--; + } else { + enqueue_huge_page(page); + } spin_unlock(&hugetlb_lock); } +/* + * Increment or decrement surplus_huge_pages. Keep node-specific counters + * balanced by operating on them in a round-robin fashion. + * Returns 1 if an adjustment was made. + */ +static int adjust_pool_surplus(int delta) +{ + static int prev_nid; + int nid = prev_nid; + int ret = 0; + + VM_BUG_ON(delta != -1 && delta != 1); + do { + nid = next_node(nid, node_online_map); + if (nid == MAX_NUMNODES) + nid = first_node(node_online_map); + + /* To shrink on this node, there must be a surplus page */ + if (delta < 0 && !surplus_huge_pages_node[nid]) + continue; + /* Surplus cannot exceed the total number of pages */ + if (delta > 0 && surplus_huge_pages_node[nid] >= + nr_huge_pages_node[nid]) + continue; + + surplus_huge_pages += delta; + surplus_huge_pages_node[nid] += delta; + ret = 1; + break; + } while (nid != prev_nid); + + prev_nid = nid; + return ret; +} + static int alloc_fresh_huge_page(void) { static int prev_nid; @@ -132,15 +197,140 @@ static int alloc_fresh_huge_page(void) return 0; } +static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, + unsigned long address) +{ + struct page *page; + + /* Check if the dynamic pool is enabled */ + if (!hugetlb_dynamic_pool) + return NULL; + + page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, + HUGETLB_PAGE_ORDER); + if (page) { + set_compound_page_dtor(page, free_huge_page); + spin_lock(&hugetlb_lock); + nr_huge_pages++; + nr_huge_pages_node[page_to_nid(page)]++; + surplus_huge_pages++; + surplus_huge_pages_node[page_to_nid(page)]++; + spin_unlock(&hugetlb_lock); + } + + return page; +} + +/* + * Increase the hugetlb pool such that it can accomodate a reservation + * of size 'delta'. + */ +static int gather_surplus_pages(int delta) +{ + struct list_head surplus_list; + struct page *page, *tmp; + int ret, i; + int needed, allocated; + + needed = (resv_huge_pages + delta) - free_huge_pages; + if (needed <= 0) + return 0; + + allocated = 0; + INIT_LIST_HEAD(&surplus_list); + + ret = -ENOMEM; +retry: + spin_unlock(&hugetlb_lock); + for (i = 0; i < needed; i++) { + page = alloc_buddy_huge_page(NULL, 0); + if (!page) { + /* + * We were not able to allocate enough pages to + * satisfy the entire reservation so we free what + * we've allocated so far. + */ + spin_lock(&hugetlb_lock); + needed = 0; + goto free; + } + + list_add(&page->lru, &surplus_list); + } + allocated += needed; + + /* + * After retaking hugetlb_lock, we need to recalculate 'needed' + * because either resv_huge_pages or free_huge_pages may have changed. + */ + spin_lock(&hugetlb_lock); + needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); + if (needed > 0) + goto retry; + + /* + * The surplus_list now contains _at_least_ the number of extra pages + * needed to accomodate the reservation. Add the appropriate number + * of pages to the hugetlb pool and free the extras back to the buddy + * allocator. + */ + needed += allocated; + ret = 0; +free: + list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + list_del(&page->lru); + if ((--needed) >= 0) + enqueue_huge_page(page); + else + update_and_free_page(page); + } + + return ret; +} + +/* + * When releasing a hugetlb pool reservation, any surplus pages that were + * allocated to satisfy the reservation must be explicitly freed if they were + * never used. + */ +void return_unused_surplus_pages(unsigned long unused_resv_pages) +{ + static int nid = -1; + struct page *page; + unsigned long nr_pages; + + nr_pages = min(unused_resv_pages, surplus_huge_pages); + + while (nr_pages) { + nid = next_node(nid, node_online_map); + if (nid == MAX_NUMNODES) + nid = first_node(node_online_map); + + if (!surplus_huge_pages_node[nid]) + continue; + + if (!list_empty(&hugepage_freelists[nid])) { + page = list_entry(hugepage_freelists[nid].next, + struct page, lru); + list_del(&page->lru); + update_and_free_page(page); + free_huge_pages--; + free_huge_pages_node[nid]--; + surplus_huge_pages--; + surplus_huge_pages_node[nid]--; + nr_pages--; + } + } +} + static struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) { - struct page *page; + struct page *page = NULL; + int use_reserved_page = vma->vm_flags & VM_MAYSHARE; spin_lock(&hugetlb_lock); - if (vma->vm_flags & VM_MAYSHARE) - resv_huge_pages--; - else if (free_huge_pages <= resv_huge_pages) + if (!use_reserved_page && (free_huge_pages <= resv_huge_pages)) goto fail; page = dequeue_huge_page(vma, addr); @@ -152,10 +342,17 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, return page; fail: - if (vma->vm_flags & VM_MAYSHARE) - resv_huge_pages++; spin_unlock(&hugetlb_lock); - return NULL; + + /* + * Private mappings do not use reserved huge pages so the allocation + * may have failed due to an undersized hugetlb pool. Try to grab a + * surplus huge page from the buddy allocator. + */ + if (!use_reserved_page) + page = alloc_buddy_huge_page(vma, addr); + + return page; } static int __init hugetlb_init(void) @@ -198,21 +395,6 @@ static unsigned int cpuset_mems_nr(unsigned int *array) } #ifdef CONFIG_SYSCTL -static void update_and_free_page(struct page *page) -{ - int i; - nr_huge_pages--; - nr_huge_pages_node[page_to_nid(page)]--; - for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { - page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | - 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | - 1 << PG_private | 1<< PG_writeback); - } - set_compound_page_dtor(page, NULL); - set_page_refcounted(page); - __free_pages(page, HUGETLB_PAGE_ORDER); -} - #ifdef CONFIG_HIGHMEM static void try_to_free_low(unsigned long count) { @@ -238,26 +420,62 @@ static inline void try_to_free_low(unsigned long count) } #endif +#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) static unsigned long set_max_huge_pages(unsigned long count) { - while (count > nr_huge_pages) { - if (!alloc_fresh_huge_page()) - return nr_huge_pages; - } - if (count >= nr_huge_pages) - return nr_huge_pages; + unsigned long min_count, ret; + /* + * Increase the pool size + * First take pages out of surplus state. Then make up the + * remaining difference by allocating fresh huge pages. + */ spin_lock(&hugetlb_lock); - count = max(count, resv_huge_pages); - try_to_free_low(count); - while (count < nr_huge_pages) { + while (surplus_huge_pages && count > persistent_huge_pages) { + if (!adjust_pool_surplus(-1)) + break; + } + + while (count > persistent_huge_pages) { + int ret; + /* + * If this allocation races such that we no longer need the + * page, free_huge_page will handle it by freeing the page + * and reducing the surplus. + */ + spin_unlock(&hugetlb_lock); + ret = alloc_fresh_huge_page(); + spin_lock(&hugetlb_lock); + if (!ret) + goto out; + + } + if (count >= persistent_huge_pages) + goto out; + + /* + * Decrease the pool size + * First return free pages to the buddy allocator (being careful + * to keep enough around to satisfy reservations). Then place + * pages into surplus state as needed so the pool will shrink + * to the desired size as pages become free. + */ + min_count = max(count, resv_huge_pages); + try_to_free_low(min_count); + while (min_count < persistent_huge_pages) { struct page *page = dequeue_huge_page(NULL, 0); if (!page) break; update_and_free_page(page); } + while (count < persistent_huge_pages) { + if (!adjust_pool_surplus(1)) + break; + } +out: + ret = persistent_huge_pages; spin_unlock(&hugetlb_lock); - return nr_huge_pages; + return ret; } int hugetlb_sysctl_handler(struct ctl_table *table, int write, @@ -289,10 +507,12 @@ int hugetlb_report_meminfo(char *buf) "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" "HugePages_Rsvd: %5lu\n" + "HugePages_Surp: %5lu\n" "Hugepagesize: %5lu kB\n", nr_huge_pages, free_huge_pages, resv_huge_pages, + surplus_huge_pages, HPAGE_SIZE/1024); } @@ -352,7 +572,6 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, entry = pte_mkwrite(pte_mkdirty(*ptep)); if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { update_mmu_cache(vma, address, entry); - lazy_mmu_prot_update(entry); } } @@ -642,7 +861,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, spin_unlock(&mm->page_table_lock); ret = hugetlb_fault(mm, vma, vaddr, 0); spin_lock(&mm->page_table_lock); - if (!(ret & VM_FAULT_MAJOR)) + if (!(ret & VM_FAULT_ERROR)) continue; remainder = 0; @@ -705,7 +924,6 @@ void hugetlb_change_protection(struct vm_area_struct *vma, pte = huge_ptep_get_and_clear(mm, address, ptep); pte = pte_mkhuge(pte_modify(pte, newprot)); set_huge_pte_at(mm, address, ptep, pte); - lazy_mmu_prot_update(pte); } } spin_unlock(&mm->page_table_lock); @@ -840,21 +1058,6 @@ static int hugetlb_acct_memory(long delta) int ret = -ENOMEM; spin_lock(&hugetlb_lock); - if ((delta + resv_huge_pages) <= free_huge_pages) { - resv_huge_pages += delta; - ret = 0; - } - spin_unlock(&hugetlb_lock); - return ret; -} - -int hugetlb_reserve_pages(struct inode *inode, long from, long to) -{ - long ret, chg; - - chg = region_chg(&inode->i_mapping->private_list, from, to); - if (chg < 0) - return chg; /* * When cpuset is configured, it breaks the strict hugetlb page * reservation as the accounting is done on a global variable. Such @@ -872,8 +1075,31 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to) * a best attempt and hopefully to minimize the impact of changing * semantics that cpuset has. */ - if (chg > cpuset_mems_nr(free_huge_pages_node)) - return -ENOMEM; + if (delta > 0) { + if (gather_surplus_pages(delta) < 0) + goto out; + + if (delta > cpuset_mems_nr(free_huge_pages_node)) + goto out; + } + + ret = 0; + resv_huge_pages += delta; + if (delta < 0) + return_unused_surplus_pages((unsigned long) -delta); + +out: + spin_unlock(&hugetlb_lock); + return ret; +} + +int hugetlb_reserve_pages(struct inode *inode, long from, long to) +{ + long ret, chg; + + chg = region_chg(&inode->i_mapping->private_list, from, to); + if (chg < 0) + return chg; ret = hugetlb_acct_memory(chg); if (ret < 0)