]> err.no Git - linux-2.6/commitdiff
Hibernation: Handle DEBUG_PAGEALLOC on x86
authorRafael J. Wysocki <rjw@sisk.pl>
Wed, 20 Feb 2008 00:47:44 +0000 (01:47 +0100)
committerLen Brown <len.brown@intel.com>
Thu, 21 Feb 2008 07:15:28 +0000 (02:15 -0500)
Make hibernation work with CONFIG_DEBUG_PAGEALLOC set on x86, by
checking if the pages to be copied are marked as present in the
kernel mapping and temporarily marking them as present if that's not
the case.  No functional modifications are introduced if
CONFIG_DEBUG_PAGEALLOC is unset.

Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: Len Brown <len.brown@intel.com>
arch/x86/mm/pageattr.c
include/linux/mm.h
kernel/power/snapshot.c

index e2a74ea11a5309ec4e8181f26bb8b62271ec0d15..464d8fc21ce69b67d8d8bf057f5a57ef6a84c677 100644 (file)
@@ -899,7 +899,24 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
         */
        cpa_fill_pool();
 }
-#endif
+
+#ifdef CONFIG_HIBERNATION
+
+bool kernel_page_present(struct page *page)
+{
+       unsigned int level;
+       pte_t *pte;
+
+       if (PageHighMem(page))
+               return false;
+
+       pte = lookup_address((unsigned long)page_address(page), &level);
+       return (pte_val(*pte) & _PAGE_PRESENT);
+}
+
+#endif /* CONFIG_HIBERNATION */
+
+#endif /* CONFIG_DEBUG_PAGEALLOC */
 
 /*
  * The testcases use internal knowledge of the implementation that shouldn't
index 26c7124b841aee0dba64cb81e3d54bed248c0282..3b3e1341163f3075321b487362dc8bba0bab91f3 100644 (file)
@@ -1171,12 +1171,18 @@ static inline void enable_debug_pagealloc(void)
 {
        debug_pagealloc_enabled = 1;
 }
+#ifdef CONFIG_HIBERNATION
+extern bool kernel_page_present(struct page *page);
+#endif /* CONFIG_HIBERNATION */
 #else
 static inline void
 kernel_map_pages(struct page *page, int numpages, int enable) {}
 static inline void enable_debug_pagealloc(void)
 {
 }
+#ifdef CONFIG_HIBERNATION
+static inline bool kernel_page_present(struct page *page) { return true; }
+#endif /* CONFIG_HIBERNATION */
 #endif
 
 extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
index 95250d7c8d91db36593fc3707b10c99a8370dc62..72a020cabb4c5238f395c1aa6420a630dc8b49dc 100644 (file)
@@ -875,8 +875,8 @@ static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
 #endif /* CONFIG_HIGHMEM */
 
 /**
- *     saveable - Determine whether a non-highmem page should be included in
- *     the suspend image.
+ *     saveable_page - Determine whether a non-highmem page should be included
+ *     in the suspend image.
  *
  *     We should save the page if it isn't Nosave, and is not in the range
  *     of pages statically defined as 'unsaveable', and it isn't a part of
@@ -897,7 +897,8 @@ static struct page *saveable_page(unsigned long pfn)
        if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
                return NULL;
 
-       if (PageReserved(page) && pfn_is_nosave(pfn))
+       if (PageReserved(page)
+           && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
                return NULL;
 
        return page;
@@ -938,6 +939,25 @@ static inline void do_copy_page(long *dst, long *src)
                *dst++ = *src++;
 }
 
+
+/**
+ *     safe_copy_page - check if the page we are going to copy is marked as
+ *             present in the kernel page tables (this always is the case if
+ *             CONFIG_DEBUG_PAGEALLOC is not set and in that case
+ *             kernel_page_present() always returns 'true').
+ */
+static void safe_copy_page(void *dst, struct page *s_page)
+{
+       if (kernel_page_present(s_page)) {
+               do_copy_page(dst, page_address(s_page));
+       } else {
+               kernel_map_pages(s_page, 1, 1);
+               do_copy_page(dst, page_address(s_page));
+               kernel_map_pages(s_page, 1, 0);
+       }
+}
+
+
 #ifdef CONFIG_HIGHMEM
 static inline struct page *
 page_is_saveable(struct zone *zone, unsigned long pfn)
@@ -946,8 +966,7 @@ page_is_saveable(struct zone *zone, unsigned long pfn)
                        saveable_highmem_page(pfn) : saveable_page(pfn);
 }
 
-static inline void
-copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
 {
        struct page *s_page, *d_page;
        void *src, *dst;
@@ -961,29 +980,26 @@ copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
                kunmap_atomic(src, KM_USER0);
                kunmap_atomic(dst, KM_USER1);
        } else {
-               src = page_address(s_page);
                if (PageHighMem(d_page)) {
                        /* Page pointed to by src may contain some kernel
                         * data modified by kmap_atomic()
                         */
-                       do_copy_page(buffer, src);
+                       safe_copy_page(buffer, s_page);
                        dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
                        memcpy(dst, buffer, PAGE_SIZE);
                        kunmap_atomic(dst, KM_USER0);
                } else {
-                       dst = page_address(d_page);
-                       do_copy_page(dst, src);
+                       safe_copy_page(page_address(d_page), s_page);
                }
        }
 }
 #else
 #define page_is_saveable(zone, pfn)    saveable_page(pfn)
 
-static inline void
-copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
 {
-       do_copy_page(page_address(pfn_to_page(dst_pfn)),
-                       page_address(pfn_to_page(src_pfn)));
+       safe_copy_page(page_address(pfn_to_page(dst_pfn)),
+                               pfn_to_page(src_pfn));
 }
 #endif /* CONFIG_HIGHMEM */