2 * linux/kernel/power/snapshot.c
4 * This file provide system snapshot/restore functionality.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
8 * This file is released under the GPLv2, and is based on swsusp.c.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
37 /* List of PBEs used for creating and restoring the suspend image */
38 struct pbe *restore_pblist;
40 static unsigned int nr_copy_pages;
41 static unsigned int nr_meta_pages;
42 static unsigned long *buffer;
45 unsigned int count_highmem_pages(void)
48 unsigned long zone_pfn;
52 if (is_highmem(zone)) {
53 mark_free_pages(zone);
54 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
56 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
59 page = pfn_to_page(pfn);
60 if (PageReserved(page))
62 if (PageNosaveFree(page))
73 struct highmem_page *next;
76 static struct highmem_page *highmem_copy;
78 static int save_highmem_zone(struct zone *zone)
80 unsigned long zone_pfn;
81 mark_free_pages(zone);
82 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
84 struct highmem_page *save;
86 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
92 page = pfn_to_page(pfn);
94 * This condition results from rvmalloc() sans vmalloc_32()
95 * and architectural memory reservations. This should be
96 * corrected eventually when the cases giving rise to this
97 * are better understood.
99 if (PageReserved(page))
101 BUG_ON(PageNosave(page));
102 if (PageNosaveFree(page))
104 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
107 save->next = highmem_copy;
109 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
114 kaddr = kmap_atomic(page, KM_USER0);
115 memcpy(save->data, kaddr, PAGE_SIZE);
116 kunmap_atomic(kaddr, KM_USER0);
122 int save_highmem(void)
127 pr_debug("swsusp: Saving Highmem");
129 for_each_zone (zone) {
130 if (is_highmem(zone))
131 res = save_highmem_zone(zone);
139 int restore_highmem(void)
141 printk("swsusp: Restoring Highmem\n");
142 while (highmem_copy) {
143 struct highmem_page *save = highmem_copy;
145 highmem_copy = save->next;
147 kaddr = kmap_atomic(save->page, KM_USER0);
148 memcpy(kaddr, save->data, PAGE_SIZE);
149 kunmap_atomic(kaddr, KM_USER0);
150 free_page((long) save->data);
156 static inline unsigned int count_highmem_pages(void) {return 0;}
157 static inline int save_highmem(void) {return 0;}
158 static inline int restore_highmem(void) {return 0;}
162 * @safe_needed - on resume, for storing the PBE list and the image,
163 * we can only use memory pages that do not conflict with the pages
164 * used before suspend.
166 * The unsafe pages are marked with the PG_nosave_free flag
167 * and we count them using unsafe_pages
172 #define PG_UNSAFE_CLEAR 1
173 #define PG_UNSAFE_KEEP 0
175 static unsigned int unsafe_pages;
177 static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
181 res = (void *)get_zeroed_page(gfp_mask);
183 while (res && PageNosaveFree(virt_to_page(res))) {
184 /* The page is unsafe, mark it for swsusp_free() */
185 SetPageNosave(virt_to_page(res));
187 res = (void *)get_zeroed_page(gfp_mask);
190 SetPageNosave(virt_to_page(res));
191 SetPageNosaveFree(virt_to_page(res));
196 unsigned long get_safe_page(gfp_t gfp_mask)
198 return (unsigned long)alloc_image_page(gfp_mask, PG_SAFE);
202 * free_image_page - free page represented by @addr, allocated with
203 * alloc_image_page (page flags set by it must be cleared)
206 static inline void free_image_page(void *addr, int clear_nosave_free)
208 ClearPageNosave(virt_to_page(addr));
209 if (clear_nosave_free)
210 ClearPageNosaveFree(virt_to_page(addr));
211 free_page((unsigned long)addr);
214 /* struct linked_page is used to build chains of pages */
216 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
219 struct linked_page *next;
220 char data[LINKED_PAGE_DATA_SIZE];
221 } __attribute__((packed));
224 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
227 struct linked_page *lp = list->next;
229 free_image_page(list, clear_page_nosave);
235 * struct chain_allocator is used for allocating small objects out of
236 * a linked list of pages called 'the chain'.
238 * The chain grows each time when there is no room for a new object in
239 * the current page. The allocated objects cannot be freed individually.
240 * It is only possible to free them all at once, by freeing the entire
243 * NOTE: The chain allocator may be inefficient if the allocated objects
244 * are not much smaller than PAGE_SIZE.
247 struct chain_allocator {
248 struct linked_page *chain; /* the chain */
249 unsigned int used_space; /* total size of objects allocated out
250 * of the current page
252 gfp_t gfp_mask; /* mask for allocating pages */
253 int safe_needed; /* if set, only "safe" pages are allocated */
257 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
260 ca->used_space = LINKED_PAGE_DATA_SIZE;
261 ca->gfp_mask = gfp_mask;
262 ca->safe_needed = safe_needed;
265 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
269 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
270 struct linked_page *lp;
272 lp = alloc_image_page(ca->gfp_mask, ca->safe_needed);
276 lp->next = ca->chain;
280 ret = ca->chain->data + ca->used_space;
281 ca->used_space += size;
285 static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
287 free_list_of_pages(ca->chain, clear_page_nosave);
288 memset(ca, 0, sizeof(struct chain_allocator));
292 * Data types related to memory bitmaps.
294 * Memory bitmap is a structure consiting of many linked lists of
295 * objects. The main list's elements are of type struct zone_bitmap
296 * and each of them corresonds to one zone. For each zone bitmap
297 * object there is a list of objects of type struct bm_block that
298 * represent each blocks of bit chunks in which information is
301 * struct memory_bitmap contains a pointer to the main list of zone
302 * bitmap objects, a struct bm_position used for browsing the bitmap,
303 * and a pointer to the list of pages used for allocating all of the
304 * zone bitmap objects and bitmap block objects.
306 * NOTE: It has to be possible to lay out the bitmap in memory
307 * using only allocations of order 0. Additionally, the bitmap is
308 * designed to work with arbitrary number of zones (this is over the
309 * top for now, but let's avoid making unnecessary assumptions ;-).
311 * struct zone_bitmap contains a pointer to a list of bitmap block
312 * objects and a pointer to the bitmap block object that has been
313 * most recently used for setting bits. Additionally, it contains the
314 * pfns that correspond to the start and end of the represented zone.
316 * struct bm_block contains a pointer to the memory page in which
317 * information is stored (in the form of a block of bit chunks
318 * of type unsigned long each). It also contains the pfns that
319 * correspond to the start and end of the represented memory area and
320 * the number of bit chunks in the block.
322 * NOTE: Memory bitmaps are used for two types of operations only:
323 * "set a bit" and "find the next bit set". Moreover, the searching
324 * is always carried out after all of the "set a bit" operations
328 #define BM_END_OF_MAP (~0UL)
330 #define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long))
331 #define BM_BITS_PER_CHUNK (sizeof(long) << 3)
332 #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
335 struct bm_block *next; /* next element of the list */
336 unsigned long start_pfn; /* pfn represented by the first bit */
337 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
338 unsigned int size; /* number of bit chunks */
339 unsigned long *data; /* chunks of bits representing pages */
343 struct zone_bitmap *next; /* next element of the list */
344 unsigned long start_pfn; /* minimal pfn in this zone */
345 unsigned long end_pfn; /* maximal pfn in this zone plus 1 */
346 struct bm_block *bm_blocks; /* list of bitmap blocks */
347 struct bm_block *cur_block; /* recently used bitmap block */
350 /* strcut bm_position is used for browsing memory bitmaps */
353 struct zone_bitmap *zone_bm;
354 struct bm_block *block;
359 struct memory_bitmap {
360 struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */
361 struct linked_page *p_list; /* list of pages used to store zone
362 * bitmap objects and bitmap block
365 struct bm_position cur; /* most recently used bit position */
368 /* Functions that operate on memory bitmaps */
370 static inline void memory_bm_reset_chunk(struct memory_bitmap *bm)
376 static void memory_bm_position_reset(struct memory_bitmap *bm)
378 struct zone_bitmap *zone_bm;
380 zone_bm = bm->zone_bm_list;
381 bm->cur.zone_bm = zone_bm;
382 bm->cur.block = zone_bm->bm_blocks;
383 memory_bm_reset_chunk(bm);
386 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
389 * create_bm_block_list - create a list of block bitmap objects
392 static inline struct bm_block *
393 create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca)
395 struct bm_block *bblist = NULL;
397 while (nr_blocks-- > 0) {
400 bb = chain_alloc(ca, sizeof(struct bm_block));
411 * create_zone_bm_list - create a list of zone bitmap objects
414 static inline struct zone_bitmap *
415 create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca)
417 struct zone_bitmap *zbmlist = NULL;
419 while (nr_zones-- > 0) {
420 struct zone_bitmap *zbm;
422 zbm = chain_alloc(ca, sizeof(struct zone_bitmap));
433 * memory_bm_create - allocate memory for a memory bitmap
437 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
439 struct chain_allocator ca;
441 struct zone_bitmap *zone_bm;
445 chain_init(&ca, gfp_mask, safe_needed);
447 /* Compute the number of zones */
450 if (populated_zone(zone) && !is_highmem(zone))
453 /* Allocate the list of zones bitmap objects */
454 zone_bm = create_zone_bm_list(nr, &ca);
455 bm->zone_bm_list = zone_bm;
457 chain_free(&ca, PG_UNSAFE_CLEAR);
461 /* Initialize the zone bitmap objects */
462 for_each_zone (zone) {
465 if (!populated_zone(zone) || is_highmem(zone))
468 zone_bm->start_pfn = zone->zone_start_pfn;
469 zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
470 /* Allocate the list of bitmap block objects */
471 nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
472 bb = create_bm_block_list(nr, &ca);
473 zone_bm->bm_blocks = bb;
474 zone_bm->cur_block = bb;
478 nr = zone->spanned_pages;
479 pfn = zone->zone_start_pfn;
480 /* Initialize the bitmap block objects */
484 ptr = alloc_image_page(gfp_mask, safe_needed);
490 if (nr >= BM_BITS_PER_BLOCK) {
491 pfn += BM_BITS_PER_BLOCK;
492 bb->size = BM_CHUNKS_PER_BLOCK;
493 nr -= BM_BITS_PER_BLOCK;
495 /* This is executed only once in the loop */
497 bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK);
502 zone_bm = zone_bm->next;
504 bm->p_list = ca.chain;
505 memory_bm_position_reset(bm);
509 bm->p_list = ca.chain;
510 memory_bm_free(bm, PG_UNSAFE_CLEAR);
515 * memory_bm_free - free memory occupied by the memory bitmap @bm
518 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
520 struct zone_bitmap *zone_bm;
522 /* Free the list of bit blocks for each zone_bitmap object */
523 zone_bm = bm->zone_bm_list;
527 bb = zone_bm->bm_blocks;
530 free_image_page(bb->data, clear_nosave_free);
533 zone_bm = zone_bm->next;
535 free_list_of_pages(bm->p_list, clear_nosave_free);
536 bm->zone_bm_list = NULL;
540 * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds
541 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
542 * of @bm->cur_zone_bm are updated.
544 * If the bit cannot be set, the function returns -EINVAL .
548 memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
550 struct zone_bitmap *zone_bm;
553 /* Check if the pfn is from the current zone */
554 zone_bm = bm->cur.zone_bm;
555 if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
556 zone_bm = bm->zone_bm_list;
557 /* We don't assume that the zones are sorted by pfns */
558 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
559 zone_bm = zone_bm->next;
560 if (unlikely(!zone_bm))
563 bm->cur.zone_bm = zone_bm;
565 /* Check if the pfn corresponds to the current bitmap block */
566 bb = zone_bm->cur_block;
567 if (pfn < bb->start_pfn)
568 bb = zone_bm->bm_blocks;
570 while (pfn >= bb->end_pfn) {
575 zone_bm->cur_block = bb;
576 pfn -= bb->start_pfn;
577 set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK);
581 /* Two auxiliary functions for memory_bm_next_pfn */
583 /* Find the first set bit in the given chunk, if there is one */
585 static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p)
588 while (bit < BM_BITS_PER_CHUNK) {
589 if (test_bit(bit, chunk_p))
597 /* Find a chunk containing some bits set in given block of bits */
599 static inline int next_chunk_in_block(int n, struct bm_block *bb)
602 while (n < bb->size) {
612 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
613 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
616 * It is required to run memory_bm_position_reset() before the first call to
620 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
622 struct zone_bitmap *zone_bm;
630 chunk = bm->cur.chunk;
633 bit = next_bit_in_chunk(bit, bb->data + chunk);
637 chunk = next_chunk_in_block(chunk, bb);
639 } while (chunk >= 0);
642 memory_bm_reset_chunk(bm);
644 zone_bm = bm->cur.zone_bm->next;
646 bm->cur.zone_bm = zone_bm;
647 bm->cur.block = zone_bm->bm_blocks;
648 memory_bm_reset_chunk(bm);
651 memory_bm_position_reset(bm);
652 return BM_END_OF_MAP;
655 bm->cur.chunk = chunk;
657 return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
661 * snapshot_additional_pages - estimate the number of additional pages
662 * be needed for setting up the suspend image data structures for given
663 * zone (usually the returned value is greater than the exact number)
666 unsigned int snapshot_additional_pages(struct zone *zone)
670 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
671 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
676 * pfn_is_nosave - check if given pfn is in the 'nosave' section
679 static inline int pfn_is_nosave(unsigned long pfn)
681 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
682 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
683 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
687 * saveable - Determine whether a page should be cloned or not.
690 * We save a page if it isn't Nosave, and is not in the range of pages
691 * statically defined as 'unsaveable', and it
692 * isn't a part of a free chunk of pages.
695 static struct page *saveable_page(unsigned long pfn)
702 page = pfn_to_page(pfn);
704 if (PageNosave(page))
706 if (PageReserved(page) && pfn_is_nosave(pfn))
708 if (PageNosaveFree(page))
714 unsigned int count_data_pages(void)
717 unsigned long pfn, max_zone_pfn;
720 for_each_zone (zone) {
721 if (is_highmem(zone))
723 mark_free_pages(zone);
724 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
725 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
726 n += !!saveable_page(pfn);
731 static inline void copy_data_page(long *dst, long *src)
735 /* copy_page and memcpy are not usable for copying task structs. */
736 for (n = PAGE_SIZE / sizeof(long); n; n--)
741 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
746 for_each_zone (zone) {
747 unsigned long max_zone_pfn;
749 if (is_highmem(zone))
752 mark_free_pages(zone);
753 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
754 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
755 if (saveable_page(pfn))
756 memory_bm_set_bit(orig_bm, pfn);
758 memory_bm_position_reset(orig_bm);
759 memory_bm_position_reset(copy_bm);
761 pfn = memory_bm_next_pfn(orig_bm);
762 if (likely(pfn != BM_END_OF_MAP)) {
766 page = pfn_to_page(pfn);
767 src = page_address(page);
768 page = pfn_to_page(memory_bm_next_pfn(copy_bm));
769 copy_data_page(page_address(page), src);
771 } while (pfn != BM_END_OF_MAP);
775 * free_pagedir - free pages allocated with alloc_pagedir()
778 static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
783 pbe = (pblist + PB_PAGE_SKIP)->next;
784 free_image_page(pblist, clear_nosave_free);
790 * fill_pb_page - Create a list of PBEs on a given memory page
793 static inline void fill_pb_page(struct pbe *pbpage, unsigned int n)
801 while (++p < pbpage);
805 * create_pbe_list - Create a list of PBEs on top of a given chain
806 * of memory pages allocated with alloc_pagedir()
808 * This function assumes that pages allocated by alloc_image_page() will
812 static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
815 unsigned int num = PBES_PER_PAGE;
817 for_each_pb_page (pbpage, pblist) {
821 fill_pb_page(pbpage, PBES_PER_PAGE);
822 num += PBES_PER_PAGE;
825 num -= PBES_PER_PAGE;
826 fill_pb_page(pbpage, nr_pages - num);
831 * alloc_pagedir - Allocate the page directory.
833 * First, determine exactly how many pages we need and
836 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
837 * struct pbe elements (pbes) and the last element in the page points
840 * On each page we set up a list of struct_pbe elements.
843 static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask,
847 struct pbe *pblist, *pbe;
852 pblist = alloc_image_page(gfp_mask, safe_needed);
854 for (num = PBES_PER_PAGE; num < nr_pages; num += PBES_PER_PAGE) {
856 free_pagedir(pblist, PG_UNSAFE_CLEAR);
860 pbe->next = alloc_image_page(gfp_mask, safe_needed);
863 create_pbe_list(pblist, nr_pages);
868 * Free pages we allocated for suspend. Suspend pages are alocated
869 * before atomic copy, so we need to free them after resume.
872 void swsusp_free(void)
875 unsigned long pfn, max_zone_pfn;
877 for_each_zone(zone) {
878 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
879 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
880 if (pfn_valid(pfn)) {
881 struct page *page = pfn_to_page(pfn);
883 if (PageNosave(page) && PageNosaveFree(page)) {
884 ClearPageNosave(page);
885 ClearPageNosaveFree(page);
886 free_page((long) page_address(page));
892 restore_pblist = NULL;
898 * enough_free_mem - Make sure we enough free memory to snapshot.
900 * Returns TRUE or FALSE after checking the number of available
904 static int enough_free_mem(unsigned int nr_pages)
910 if (!is_highmem(zone))
911 n += zone->free_pages;
912 pr_debug("swsusp: available memory: %u pages\n", n);
913 return n > (nr_pages + PAGES_FOR_IO +
914 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
918 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
919 unsigned int nr_pages)
923 error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
927 error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
931 while (nr_pages-- > 0) {
932 struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD);
937 SetPageNosaveFree(page);
938 memory_bm_set_bit(copy_bm, page_to_pfn(page));
947 /* Memory bitmap used for marking saveable pages */
948 static struct memory_bitmap orig_bm;
949 /* Memory bitmap used for marking allocated pages that will contain the copies
952 static struct memory_bitmap copy_bm;
954 asmlinkage int swsusp_save(void)
956 unsigned int nr_pages;
958 pr_debug("swsusp: critical section: \n");
961 nr_pages = count_data_pages();
962 printk("swsusp: Need to copy %u pages\n", nr_pages);
964 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
966 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
967 PAGES_FOR_IO, nr_free_pages());
969 if (!enough_free_mem(nr_pages)) {
970 printk(KERN_ERR "swsusp: Not enough free memory\n");
974 if (swsusp_alloc(&orig_bm, ©_bm, nr_pages))
977 /* During allocating of suspend pagedir, new cold pages may appear.
981 copy_data_pages(©_bm, &orig_bm);
984 * End of critical section. From now on, we can write to memory,
985 * but we should not touch disk. This specially means we must _not_
986 * touch swap space! Except we must write out our image of course.
989 nr_copy_pages = nr_pages;
990 nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
992 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
996 static void init_header(struct swsusp_info *info)
998 memset(info, 0, sizeof(struct swsusp_info));
999 info->version_code = LINUX_VERSION_CODE;
1000 info->num_physpages = num_physpages;
1001 memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
1002 info->cpus = num_online_cpus();
1003 info->image_pages = nr_copy_pages;
1004 info->pages = nr_copy_pages + nr_meta_pages + 1;
1005 info->size = info->pages;
1006 info->size <<= PAGE_SHIFT;
1010 * pack_addresses - the addresses corresponding to pfns found in the
1011 * bitmap @bm are stored in the array @buf[] (1 page)
1015 pack_addresses(unsigned long *buf, struct memory_bitmap *bm)
1019 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1020 unsigned long pfn = memory_bm_next_pfn(bm);
1022 if (unlikely(pfn == BM_END_OF_MAP))
1025 buf[j] = (unsigned long)page_address(pfn_to_page(pfn));
1030 * snapshot_read_next - used for reading the system memory snapshot.
1032 * On the first call to it @handle should point to a zeroed
1033 * snapshot_handle structure. The structure gets updated and a pointer
1034 * to it should be passed to this function every next time.
1036 * The @count parameter should contain the number of bytes the caller
1037 * wants to read from the snapshot. It must not be zero.
1039 * On success the function returns a positive number. Then, the caller
1040 * is allowed to read up to the returned number of bytes from the memory
1041 * location computed by the data_of() macro. The number returned
1042 * may be smaller than @count, but this only happens if the read would
1043 * cross a page boundary otherwise.
1045 * The function returns 0 to indicate the end of data stream condition,
1046 * and a negative number is returned on error. In such cases the
1047 * structure pointed to by @handle is not updated and should not be used
1051 int snapshot_read_next(struct snapshot_handle *handle, size_t count)
1053 if (handle->cur > nr_meta_pages + nr_copy_pages)
1057 /* This makes the buffer be freed by swsusp_free() */
1058 buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
1062 if (!handle->offset) {
1063 init_header((struct swsusp_info *)buffer);
1064 handle->buffer = buffer;
1065 memory_bm_position_reset(&orig_bm);
1066 memory_bm_position_reset(©_bm);
1068 if (handle->prev < handle->cur) {
1069 if (handle->cur <= nr_meta_pages) {
1070 memset(buffer, 0, PAGE_SIZE);
1071 pack_addresses(buffer, &orig_bm);
1073 unsigned long pfn = memory_bm_next_pfn(©_bm);
1075 handle->buffer = page_address(pfn_to_page(pfn));
1077 handle->prev = handle->cur;
1079 handle->buf_offset = handle->cur_offset;
1080 if (handle->cur_offset + count >= PAGE_SIZE) {
1081 count = PAGE_SIZE - handle->cur_offset;
1082 handle->cur_offset = 0;
1085 handle->cur_offset += count;
1087 handle->offset += count;
1092 * mark_unsafe_pages - mark the pages that cannot be used for storing
1093 * the image during resume, because they conflict with the pages that
1094 * had been used before suspend
1097 static int mark_unsafe_pages(struct pbe *pblist)
1100 unsigned long pfn, max_zone_pfn;
1103 if (!pblist) /* a sanity check */
1106 /* Clear page flags */
1107 for_each_zone (zone) {
1108 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1109 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1111 ClearPageNosaveFree(pfn_to_page(pfn));
1114 /* Mark orig addresses */
1115 for_each_pbe (p, pblist) {
1116 if (virt_addr_valid(p->orig_address))
1117 SetPageNosaveFree(virt_to_page(p->orig_address));
1127 static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
1129 /* We assume both lists contain the same number of elements */
1131 dst->orig_address = src->orig_address;
1137 static int check_header(struct swsusp_info *info)
1139 char *reason = NULL;
1141 if (info->version_code != LINUX_VERSION_CODE)
1142 reason = "kernel version";
1143 if (info->num_physpages != num_physpages)
1144 reason = "memory size";
1145 if (strcmp(info->uts.sysname,system_utsname.sysname))
1146 reason = "system type";
1147 if (strcmp(info->uts.release,system_utsname.release))
1148 reason = "kernel release";
1149 if (strcmp(info->uts.version,system_utsname.version))
1151 if (strcmp(info->uts.machine,system_utsname.machine))
1154 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
1161 * load header - check the image header and copy data from it
1164 static int load_header(struct snapshot_handle *handle,
1165 struct swsusp_info *info)
1170 error = check_header(info);
1172 pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, PG_ANY);
1175 restore_pblist = pblist;
1176 handle->pbe = pblist;
1177 nr_copy_pages = info->image_pages;
1178 nr_meta_pages = info->pages - info->image_pages - 1;
1184 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
1185 * the PBEs in the list starting at @pbe
1188 static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
1193 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
1194 pbe->orig_address = buf[j];
1201 * prepare_image - use metadata contained in the PBE list
1202 * pointed to by restore_pblist to mark the pages that will
1203 * be overwritten in the process of restoring the system
1204 * memory state from the image ("unsafe" pages) and allocate
1205 * memory for the image
1207 * The idea is to allocate the PBE list first and then
1208 * allocate as many pages as it's needed for the image data,
1209 * but not to assign these pages to the PBEs initially.
1210 * Instead, we just mark them as allocated and create a list
1211 * of "safe" which will be used later
1214 static struct linked_page *safe_pages;
1216 static int prepare_image(struct snapshot_handle *handle)
1219 unsigned int nr_pages = nr_copy_pages;
1220 struct pbe *p, *pblist = NULL;
1223 error = mark_unsafe_pages(p);
1225 pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, PG_SAFE);
1227 copy_page_backup_list(pblist, p);
1228 free_pagedir(p, PG_UNSAFE_KEEP);
1233 if (!error && nr_pages > unsafe_pages) {
1234 nr_pages -= unsafe_pages;
1235 while (nr_pages--) {
1236 struct linked_page *ptr;
1238 ptr = (void *)get_zeroed_page(GFP_ATOMIC);
1243 if (!PageNosaveFree(virt_to_page(ptr))) {
1244 /* The page is "safe", add it to the list */
1245 ptr->next = safe_pages;
1248 /* Mark the page as allocated */
1249 SetPageNosave(virt_to_page(ptr));
1250 SetPageNosaveFree(virt_to_page(ptr));
1254 restore_pblist = pblist;
1262 static void *get_buffer(struct snapshot_handle *handle)
1264 struct pbe *pbe = handle->pbe, *last = handle->last_pbe;
1265 struct page *page = virt_to_page(pbe->orig_address);
1267 if (PageNosave(page) && PageNosaveFree(page)) {
1269 * We have allocated the "original" page frame and we can
1270 * use it directly to store the read page
1273 if (last && last->next)
1275 return (void *)pbe->orig_address;
1278 * The "original" page frame has not been allocated and we have to
1279 * use a "safe" page frame to store the read page
1281 pbe->address = (unsigned long)safe_pages;
1282 safe_pages = safe_pages->next;
1285 handle->last_pbe = pbe;
1286 return (void *)pbe->address;
1290 * snapshot_write_next - used for writing the system memory snapshot.
1292 * On the first call to it @handle should point to a zeroed
1293 * snapshot_handle structure. The structure gets updated and a pointer
1294 * to it should be passed to this function every next time.
1296 * The @count parameter should contain the number of bytes the caller
1297 * wants to write to the image. It must not be zero.
1299 * On success the function returns a positive number. Then, the caller
1300 * is allowed to write up to the returned number of bytes to the memory
1301 * location computed by the data_of() macro. The number returned
1302 * may be smaller than @count, but this only happens if the write would
1303 * cross a page boundary otherwise.
1305 * The function returns 0 to indicate the "end of file" condition,
1306 * and a negative number is returned on error. In such cases the
1307 * structure pointed to by @handle is not updated and should not be used
1311 int snapshot_write_next(struct snapshot_handle *handle, size_t count)
1315 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
1318 /* This makes the buffer be freed by swsusp_free() */
1319 buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
1323 if (!handle->offset)
1324 handle->buffer = buffer;
1325 handle->sync_read = 1;
1326 if (handle->prev < handle->cur) {
1327 if (!handle->prev) {
1328 error = load_header(handle,
1329 (struct swsusp_info *)buffer);
1332 } else if (handle->prev <= nr_meta_pages) {
1333 handle->pbe = unpack_orig_addresses(buffer,
1336 error = prepare_image(handle);
1339 handle->pbe = restore_pblist;
1340 handle->last_pbe = NULL;
1341 handle->buffer = get_buffer(handle);
1342 handle->sync_read = 0;
1345 handle->pbe = handle->pbe->next;
1346 handle->buffer = get_buffer(handle);
1347 handle->sync_read = 0;
1349 handle->prev = handle->cur;
1351 handle->buf_offset = handle->cur_offset;
1352 if (handle->cur_offset + count >= PAGE_SIZE) {
1353 count = PAGE_SIZE - handle->cur_offset;
1354 handle->cur_offset = 0;
1357 handle->cur_offset += count;
1359 handle->offset += count;
1363 int snapshot_image_loaded(struct snapshot_handle *handle)
1365 return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
1366 handle->cur <= nr_meta_pages + nr_copy_pages);