]> err.no Git - linux-2.6/commitdiff
x86: cpa, use page pool
authorThomas Gleixner <tglx@linutronix.de>
Sat, 9 Feb 2008 22:24:09 +0000 (23:24 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 9 Feb 2008 22:24:09 +0000 (23:24 +0100)
Switch the split page code to use the page pool. We do this
unconditionally to avoid different behaviour with and without
DEBUG_PAGEALLOC enabled.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/pageattr.c

index 831462c3bc3578e47c428de55d023ef9e62ae77c..e5d29a112d002f04898a9d32994cf1d4aefc0f17 100644 (file)
@@ -411,20 +411,29 @@ void __init cpa_init(void)
 static int split_large_page(pte_t *kpte, unsigned long address)
 {
        unsigned long flags, pfn, pfninc = 1;
-       gfp_t gfp_flags = GFP_KERNEL;
        unsigned int i, level;
        pte_t *pbase, *tmp;
        pgprot_t ref_prot;
        struct page *base;
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
-#endif
-       base = alloc_pages(gfp_flags, 0);
-       if (!base)
+       /*
+        * Get a page from the pool. The pool list is protected by the
+        * pgd_lock, which we have to take anyway for the split
+        * operation:
+        */
+       spin_lock_irqsave(&pgd_lock, flags);
+       if (list_empty(&page_pool)) {
+               spin_unlock_irqrestore(&pgd_lock, flags);
                return -ENOMEM;
+       }
+
+       base = list_first_entry(&page_pool, struct page, lru);
+       list_del(&base->lru);
+       pool_pages--;
+
+       if (pool_pages < pool_low)
+               pool_low = pool_pages;
 
-       spin_lock_irqsave(&pgd_lock, flags);
        /*
         * Check for races, another CPU might have split this page
         * up for us already:
@@ -469,11 +478,17 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        base = NULL;
 
 out_unlock:
+       /*
+        * If we dropped out via the lookup_address check under
+        * pgd_lock then stick the page back into the pool:
+        */
+       if (base) {
+               list_add(&base->lru, &page_pool);
+               pool_pages++;
+       } else
+               pool_used++;
        spin_unlock_irqrestore(&pgd_lock, flags);
 
-       if (base)
-               __free_pages(base, 0);
-
        return 0;
 }