free_huge_pages_node[nid]++;
}
-static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+static struct page *dequeue_huge_page(void)
+{
+ int nid;
+ struct page *page = NULL;
+
+ for (nid = 0; nid < MAX_NUMNODES; ++nid) {
+ if (!list_empty(&hugepage_freelists[nid])) {
+ page = list_entry(hugepage_freelists[nid].next,
+ struct page, lru);
+ list_del(&page->lru);
+ free_huge_pages--;
+ free_huge_pages_node[nid]--;
+ break;
+ }
+ }
+ return page;
+}
+
+static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
unsigned long address)
{
int nid;
spin_lock(&hugetlb_lock);
if (page) {
+ /*
+ * This page is now managed by the hugetlb allocator and has
+ * no users -- drop the buddy allocator's reference.
+ */
+ put_page_testzero(page);
+ VM_BUG_ON(page_count(page));
nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page);
/*
enqueue_huge_page(page);
else {
/*
- * Decrement the refcount and free the page using its
- * destructor. This must be done with hugetlb_lock
+ * The page has a reference count of zero already, so
+ * call free_huge_page directly instead of using
+ * put_page. This must be done with hugetlb_lock
* unlocked which is safe because free_huge_page takes
* hugetlb_lock before deciding how to free the page.
*/
spin_unlock(&hugetlb_lock);
- put_page(page);
+ free_huge_page(page);
spin_lock(&hugetlb_lock);
}
}
struct page *page;
spin_lock(&hugetlb_lock);
- page = dequeue_huge_page(vma, addr);
+ page = dequeue_huge_page_vma(vma, addr);
spin_unlock(&hugetlb_lock);
return page ? page : ERR_PTR(-VM_FAULT_OOM);
}
spin_lock(&hugetlb_lock);
if (free_huge_pages > resv_huge_pages)
- page = dequeue_huge_page(vma, addr);
+ page = dequeue_huge_page_vma(vma, addr);
spin_unlock(&hugetlb_lock);
if (!page) {
page = alloc_buddy_huge_page(vma, addr);
min_count = max(count, min_count);
try_to_free_low(min_count);
while (min_count < persistent_huge_pages) {
- struct page *page = dequeue_huge_page(NULL, 0);
+ struct page *page = dequeue_huge_page();
if (!page)
break;
update_and_free_page(page);