X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fhugetlb.c;h=8b809ecefa39e4b54f4bfbed8830009a9e260e96;hb=e697b8d13ede3893724898d983eff3f8c9183643;hp=de4cf458d6e1979432aad46613d0816146347fc1;hpb=5a16eff86dc1194a17c69250492e820d828e3bde;p=linux-2.6 diff --git a/mm/hugetlb.c b/mm/hugetlb.c index de4cf458d6..8b809ecefa 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -23,12 +23,16 @@ const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; +static unsigned long surplus_huge_pages; unsigned long max_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES]; +static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; static gfp_t htlb_alloc_mask = GFP_HIGHUSER; unsigned long hugepages_treat_as_movable; +int hugetlb_dynamic_pool; +static int hugetlb_next_nid; /* * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages @@ -42,7 +46,7 @@ static void clear_huge_page(struct page *page, unsigned long addr) might_sleep(); for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { cond_resched(); - clear_user_highpage(page + i, addr); + clear_user_highpage(page + i, addr + i * PAGE_SIZE); } } @@ -71,8 +75,9 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma, { int nid; struct page *page = NULL; + struct mempolicy *mpol; struct zonelist *zonelist = huge_zonelist(vma, address, - htlb_alloc_mask); + htlb_alloc_mask, &mpol); struct zone **z; for (z = zonelist->zones; *z; z++) { @@ -84,64 +89,278 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma, list_del(&page->lru); free_huge_pages--; free_huge_pages_node[nid]--; + if (vma && vma->vm_flags & VM_MAYSHARE) + resv_huge_pages--; break; } } + mpol_free(mpol); /* unref if mpol !NULL */ return page; } +static void update_and_free_page(struct page *page) +{ + int i; + nr_huge_pages--; + nr_huge_pages_node[page_to_nid(page)]--; + for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { + page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | + 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | + 1 << PG_private | 1<< PG_writeback); + } + set_compound_page_dtor(page, NULL); + set_page_refcounted(page); + __free_pages(page, HUGETLB_PAGE_ORDER); +} + static void free_huge_page(struct page *page) { - BUG_ON(page_count(page)); + int nid = page_to_nid(page); + BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); spin_lock(&hugetlb_lock); - enqueue_huge_page(page); + if (surplus_huge_pages_node[nid]) { + update_and_free_page(page); + surplus_huge_pages--; + surplus_huge_pages_node[nid]--; + } else { + enqueue_huge_page(page); + } spin_unlock(&hugetlb_lock); } -static int alloc_fresh_huge_page(void) +/* + * Increment or decrement surplus_huge_pages. Keep node-specific counters + * balanced by operating on them in a round-robin fashion. + * Returns 1 if an adjustment was made. + */ +static int adjust_pool_surplus(int delta) { static int prev_nid; - struct page *page; - int nid; + int nid = prev_nid; + int ret = 0; + + VM_BUG_ON(delta != -1 && delta != 1); + do { + nid = next_node(nid, node_online_map); + if (nid == MAX_NUMNODES) + nid = first_node(node_online_map); + + /* To shrink on this node, there must be a surplus page */ + if (delta < 0 && !surplus_huge_pages_node[nid]) + continue; + /* Surplus cannot exceed the total number of pages */ + if (delta > 0 && surplus_huge_pages_node[nid] >= + nr_huge_pages_node[nid]) + continue; + + surplus_huge_pages += delta; + surplus_huge_pages_node[nid] += delta; + ret = 1; + break; + } while (nid != prev_nid); - /* - * Copy static prev_nid to local nid, work on that, then copy it - * back to prev_nid afterwards: otherwise there's a window in which - * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node. - * But we don't need to use a spin_lock here: it really doesn't - * matter if occasionally a racer chooses the same nid as we do. - */ - nid = next_node(prev_nid, node_online_map); - if (nid == MAX_NUMNODES) - nid = first_node(node_online_map); prev_nid = nid; + return ret; +} + +static struct page *alloc_fresh_huge_page_node(int nid) +{ + struct page *page; - page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, + page = alloc_pages_node(nid, + htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, + HUGETLB_PAGE_ORDER); + if (page) { + set_compound_page_dtor(page, free_huge_page); + spin_lock(&hugetlb_lock); + nr_huge_pages++; + nr_huge_pages_node[nid]++; + spin_unlock(&hugetlb_lock); + put_page(page); /* free it into the hugepage allocator */ + } + + return page; +} + +static int alloc_fresh_huge_page(void) +{ + struct page *page; + int start_nid; + int next_nid; + int ret = 0; + + start_nid = hugetlb_next_nid; + + do { + page = alloc_fresh_huge_page_node(hugetlb_next_nid); + if (page) + ret = 1; + /* + * Use a helper variable to find the next node and then + * copy it back to hugetlb_next_nid afterwards: + * otherwise there's a window in which a racer might + * pass invalid nid MAX_NUMNODES to alloc_pages_node. + * But we don't need to use a spin_lock here: it really + * doesn't matter if occasionally a racer chooses the + * same nid as we do. Move nid forward in the mask even + * if we just successfully allocated a hugepage so that + * the next caller gets hugepages on the next node. + */ + next_nid = next_node(hugetlb_next_nid, node_online_map); + if (next_nid == MAX_NUMNODES) + next_nid = first_node(node_online_map); + hugetlb_next_nid = next_nid; + } while (!page && hugetlb_next_nid != start_nid); + + return ret; +} + +static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, + unsigned long address) +{ + struct page *page; + + /* Check if the dynamic pool is enabled */ + if (!hugetlb_dynamic_pool) + return NULL; + + page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, HUGETLB_PAGE_ORDER); if (page) { set_compound_page_dtor(page, free_huge_page); spin_lock(&hugetlb_lock); nr_huge_pages++; nr_huge_pages_node[page_to_nid(page)]++; + surplus_huge_pages++; + surplus_huge_pages_node[page_to_nid(page)]++; spin_unlock(&hugetlb_lock); - put_page(page); /* free it into the hugepage allocator */ - return 1; } - return 0; + + return page; +} + +/* + * Increase the hugetlb pool such that it can accomodate a reservation + * of size 'delta'. + */ +static int gather_surplus_pages(int delta) +{ + struct list_head surplus_list; + struct page *page, *tmp; + int ret, i; + int needed, allocated; + + needed = (resv_huge_pages + delta) - free_huge_pages; + if (needed <= 0) + return 0; + + allocated = 0; + INIT_LIST_HEAD(&surplus_list); + + ret = -ENOMEM; +retry: + spin_unlock(&hugetlb_lock); + for (i = 0; i < needed; i++) { + page = alloc_buddy_huge_page(NULL, 0); + if (!page) { + /* + * We were not able to allocate enough pages to + * satisfy the entire reservation so we free what + * we've allocated so far. + */ + spin_lock(&hugetlb_lock); + needed = 0; + goto free; + } + + list_add(&page->lru, &surplus_list); + } + allocated += needed; + + /* + * After retaking hugetlb_lock, we need to recalculate 'needed' + * because either resv_huge_pages or free_huge_pages may have changed. + */ + spin_lock(&hugetlb_lock); + needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); + if (needed > 0) + goto retry; + + /* + * The surplus_list now contains _at_least_ the number of extra pages + * needed to accomodate the reservation. Add the appropriate number + * of pages to the hugetlb pool and free the extras back to the buddy + * allocator. + */ + needed += allocated; + ret = 0; +free: + list_for_each_entry_safe(page, tmp, &surplus_list, lru) { + list_del(&page->lru); + if ((--needed) >= 0) + enqueue_huge_page(page); + else { + /* + * Decrement the refcount and free the page using its + * destructor. This must be done with hugetlb_lock + * unlocked which is safe because free_huge_page takes + * hugetlb_lock before deciding how to free the page. + */ + spin_unlock(&hugetlb_lock); + put_page(page); + spin_lock(&hugetlb_lock); + } + } + + return ret; +} + +/* + * When releasing a hugetlb pool reservation, any surplus pages that were + * allocated to satisfy the reservation must be explicitly freed if they were + * never used. + */ +void return_unused_surplus_pages(unsigned long unused_resv_pages) +{ + static int nid = -1; + struct page *page; + unsigned long nr_pages; + + nr_pages = min(unused_resv_pages, surplus_huge_pages); + + while (nr_pages) { + nid = next_node(nid, node_online_map); + if (nid == MAX_NUMNODES) + nid = first_node(node_online_map); + + if (!surplus_huge_pages_node[nid]) + continue; + + if (!list_empty(&hugepage_freelists[nid])) { + page = list_entry(hugepage_freelists[nid].next, + struct page, lru); + list_del(&page->lru); + update_and_free_page(page); + free_huge_pages--; + free_huge_pages_node[nid]--; + surplus_huge_pages--; + surplus_huge_pages_node[nid]--; + nr_pages--; + } + } } static struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) { - struct page *page; + struct page *page = NULL; + int use_reserved_page = vma->vm_flags & VM_MAYSHARE; spin_lock(&hugetlb_lock); - if (vma->vm_flags & VM_MAYSHARE) - resv_huge_pages--; - else if (free_huge_pages <= resv_huge_pages) + if (!use_reserved_page && (free_huge_pages <= resv_huge_pages)) goto fail; page = dequeue_huge_page(vma, addr); @@ -153,10 +372,17 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, return page; fail: - if (vma->vm_flags & VM_MAYSHARE) - resv_huge_pages++; spin_unlock(&hugetlb_lock); - return NULL; + + /* + * Private mappings do not use reserved huge pages so the allocation + * may have failed due to an undersized hugetlb pool. Try to grab a + * surplus huge page from the buddy allocator. + */ + if (!use_reserved_page) + page = alloc_buddy_huge_page(vma, addr); + + return page; } static int __init hugetlb_init(void) @@ -169,6 +395,8 @@ static int __init hugetlb_init(void) for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&hugepage_freelists[i]); + hugetlb_next_nid = first_node(node_online_map); + for (i = 0; i < max_huge_pages; ++i) { if (!alloc_fresh_huge_page()) break; @@ -199,21 +427,6 @@ static unsigned int cpuset_mems_nr(unsigned int *array) } #ifdef CONFIG_SYSCTL -static void update_and_free_page(struct page *page) -{ - int i; - nr_huge_pages--; - nr_huge_pages_node[page_to_nid(page)]--; - for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { - page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | - 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | - 1 << PG_private | 1<< PG_writeback); - } - set_compound_page_dtor(page, NULL); - set_page_refcounted(page); - __free_pages(page, HUGETLB_PAGE_ORDER); -} - #ifdef CONFIG_HIGHMEM static void try_to_free_low(unsigned long count) { @@ -222,14 +435,14 @@ static void try_to_free_low(unsigned long count) for (i = 0; i < MAX_NUMNODES; ++i) { struct page *page, *next; list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { + if (count >= nr_huge_pages) + return; if (PageHighMem(page)) continue; list_del(&page->lru); update_and_free_page(page); free_huge_pages--; free_huge_pages_node[page_to_nid(page)]--; - if (count >= nr_huge_pages) - return; } } } @@ -239,26 +452,61 @@ static inline void try_to_free_low(unsigned long count) } #endif +#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) static unsigned long set_max_huge_pages(unsigned long count) { - while (count > nr_huge_pages) { - if (!alloc_fresh_huge_page()) - return nr_huge_pages; - } - if (count >= nr_huge_pages) - return nr_huge_pages; + unsigned long min_count, ret; + /* + * Increase the pool size + * First take pages out of surplus state. Then make up the + * remaining difference by allocating fresh huge pages. + */ spin_lock(&hugetlb_lock); - count = max(count, resv_huge_pages); - try_to_free_low(count); - while (count < nr_huge_pages) { + while (surplus_huge_pages && count > persistent_huge_pages) { + if (!adjust_pool_surplus(-1)) + break; + } + + while (count > persistent_huge_pages) { + int ret; + /* + * If this allocation races such that we no longer need the + * page, free_huge_page will handle it by freeing the page + * and reducing the surplus. + */ + spin_unlock(&hugetlb_lock); + ret = alloc_fresh_huge_page(); + spin_lock(&hugetlb_lock); + if (!ret) + goto out; + + } + + /* + * Decrease the pool size + * First return free pages to the buddy allocator (being careful + * to keep enough around to satisfy reservations). Then place + * pages into surplus state as needed so the pool will shrink + * to the desired size as pages become free. + */ + min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; + min_count = max(count, min_count); + try_to_free_low(min_count); + while (min_count < persistent_huge_pages) { struct page *page = dequeue_huge_page(NULL, 0); if (!page) break; update_and_free_page(page); } + while (count < persistent_huge_pages) { + if (!adjust_pool_surplus(1)) + break; + } +out: + ret = persistent_huge_pages; spin_unlock(&hugetlb_lock); - return nr_huge_pages; + return ret; } int hugetlb_sysctl_handler(struct ctl_table *table, int write, @@ -290,10 +538,12 @@ int hugetlb_report_meminfo(char *buf) "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" "HugePages_Rsvd: %5lu\n" + "HugePages_Surp: %5lu\n" "Hugepagesize: %5lu kB\n", nr_huge_pages, free_huge_pages, resv_huge_pages, + surplus_huge_pages, HPAGE_SIZE/1024); } @@ -353,7 +603,6 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, entry = pte_mkwrite(pte_mkdirty(*ptep)); if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { update_mmu_cache(vma, address, entry); - lazy_mmu_prot_update(entry); } } @@ -706,7 +955,6 @@ void hugetlb_change_protection(struct vm_area_struct *vma, pte = huge_ptep_get_and_clear(mm, address, ptep); pte = pte_mkhuge(pte_modify(pte, newprot)); set_huge_pte_at(mm, address, ptep, pte); - lazy_mmu_prot_update(pte); } } spin_unlock(&mm->page_table_lock); @@ -769,10 +1017,10 @@ static long region_chg(struct list_head *head, long f, long t) /* If we are below the current region then a new region is required. * Subtle, allocate a new region at the position but make it zero - * size such that we can guarentee to record the reservation. */ + * size such that we can guarantee to record the reservation. */ if (&rg->link == head || t < rg->from) { nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); - if (nrg == 0) + if (!nrg) return -ENOMEM; nrg->from = f; nrg->to = f; @@ -841,21 +1089,6 @@ static int hugetlb_acct_memory(long delta) int ret = -ENOMEM; spin_lock(&hugetlb_lock); - if ((delta + resv_huge_pages) <= free_huge_pages) { - resv_huge_pages += delta; - ret = 0; - } - spin_unlock(&hugetlb_lock); - return ret; -} - -int hugetlb_reserve_pages(struct inode *inode, long from, long to) -{ - long ret, chg; - - chg = region_chg(&inode->i_mapping->private_list, from, to); - if (chg < 0) - return chg; /* * When cpuset is configured, it breaks the strict hugetlb page * reservation as the accounting is done on a global variable. Such @@ -873,8 +1106,31 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to) * a best attempt and hopefully to minimize the impact of changing * semantics that cpuset has. */ - if (chg > cpuset_mems_nr(free_huge_pages_node)) - return -ENOMEM; + if (delta > 0) { + if (gather_surplus_pages(delta) < 0) + goto out; + + if (delta > cpuset_mems_nr(free_huge_pages_node)) + goto out; + } + + ret = 0; + resv_huge_pages += delta; + if (delta < 0) + return_unused_surplus_pages((unsigned long) -delta); + +out: + spin_unlock(&hugetlb_lock); + return ret; +} + +int hugetlb_reserve_pages(struct inode *inode, long from, long to) +{ + long ret, chg; + + chg = region_chg(&inode->i_mapping->private_list, from, to); + if (chg < 0) + return chg; ret = hugetlb_acct_memory(chg); if (ret < 0)