From 2fc39cec6a9b5b41727d3386b780b69422a15152 Mon Sep 17 00:00:00 2001 From: Adam Litke Date: Wed, 14 Nov 2007 16:59:39 -0800 Subject: [PATCH] hugetlb: debit quota in alloc_huge_page Now that quota is credited by free_huge_page(), calls to hugetlb_get_quota() seem out of place. The alloc/free API is unbalanced because we handle the hugetlb_put_quota() but expect the caller to open-code hugetlb_get_quota(). Move the get inside alloc_huge_page to clean up this disparity. This patch has been kept apart from the previous patch because of the somewhat dodgy ERR_PTR() use herein. Moving the quota logic means that alloc_huge_page() has two failure modes. Quota failure must result in a SIGBUS while a standard allocation failure is OOM. Unfortunately, ERR_PTR() doesn't like the small positive errnos we have in VM_FAULT_* so they must be negated before they are used. Does anyone take issue with the way I am using PTR_ERR. If so, what are your thoughts on how to clean this up (without needing an if,else if,else block at each alloc_huge_page() callsite)? Signed-off-by: Adam Litke Cc: Ken Chen Cc: Andy Whitcroft Cc: Dave Hansen Cc: David Gibson Cc: William Lee Irwin III Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3992bd5120..bc12b0adfa 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -388,6 +388,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) { struct page *page; + struct address_space *mapping = vma->vm_file->f_mapping; + + if (hugetlb_get_quota(mapping)) + return ERR_PTR(-VM_FAULT_SIGBUS); if (vma->vm_flags & VM_MAYSHARE) page = alloc_huge_page_shared(vma, addr); @@ -395,9 +399,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, page = alloc_huge_page_private(vma, addr); if (page) { set_page_refcounted(page); - set_page_private(page, (unsigned long) vma->vm_file->f_mapping); - } - return page; + set_page_private(page, (unsigned long) mapping); + return page; + } else + return ERR_PTR(-VM_FAULT_OOM); } static int __init hugetlb_init(void) @@ -737,15 +742,13 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, set_huge_ptep_writable(vma, address, ptep); return 0; } - if (hugetlb_get_quota(vma->vm_file->f_mapping)) - return VM_FAULT_SIGBUS; page_cache_get(old_page); new_page = alloc_huge_page(vma, address); - if (!new_page) { + if (IS_ERR(new_page)) { page_cache_release(old_page); - return VM_FAULT_OOM; + return -PTR_ERR(new_page); } spin_unlock(&mm->page_table_lock); @@ -789,12 +792,9 @@ retry: size = i_size_read(mapping->host) >> HPAGE_SHIFT; if (idx >= size) goto out; - if (hugetlb_get_quota(mapping)) - goto out; page = alloc_huge_page(vma, address); - if (!page) { - hugetlb_put_quota(mapping); - ret = VM_FAULT_OOM; + if (IS_ERR(page)) { + ret = -PTR_ERR(page); goto out; } clear_huge_page(page, address); -- 2.39.5