2 * linux/kernel/power/snapshot.c
4 * This file provides system snapshot/restore functionality for swsusp.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * This file is released under the GPLv2.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
37 /* List of PBEs needed for restoring the pages that were allocated before
38 * the suspend and included in the suspend image, but have also been
39 * allocated by the "resume" kernel, so their contents cannot be written
40 * directly to their "original" page frames.
42 struct pbe
*restore_pblist
;
44 /* Pointer to an auxiliary buffer (1 page) */
48 * @safe_needed - on resume, for storing the PBE list and the image,
49 * we can only use memory pages that do not conflict with the pages
50 * used before suspend. The unsafe pages have PageNosaveFree set
51 * and we count them using unsafe_pages.
53 * Each allocated image page is marked as PageNosave and PageNosaveFree
54 * so that swsusp_free() can release it.
59 #define PG_UNSAFE_CLEAR 1
60 #define PG_UNSAFE_KEEP 0
62 static unsigned int allocated_unsafe_pages
;
64 static void *get_image_page(gfp_t gfp_mask
, int safe_needed
)
68 res
= (void *)get_zeroed_page(gfp_mask
);
70 while (res
&& PageNosaveFree(virt_to_page(res
))) {
71 /* The page is unsafe, mark it for swsusp_free() */
72 SetPageNosave(virt_to_page(res
));
73 allocated_unsafe_pages
++;
74 res
= (void *)get_zeroed_page(gfp_mask
);
77 SetPageNosave(virt_to_page(res
));
78 SetPageNosaveFree(virt_to_page(res
));
83 unsigned long get_safe_page(gfp_t gfp_mask
)
85 return (unsigned long)get_image_page(gfp_mask
, PG_SAFE
);
88 static struct page
*alloc_image_page(gfp_t gfp_mask
)
92 page
= alloc_page(gfp_mask
);
95 SetPageNosaveFree(page
);
101 * free_image_page - free page represented by @addr, allocated with
102 * get_image_page (page flags set by it must be cleared)
105 static inline void free_image_page(void *addr
, int clear_nosave_free
)
109 BUG_ON(!virt_addr_valid(addr
));
111 page
= virt_to_page(addr
);
113 ClearPageNosave(page
);
114 if (clear_nosave_free
)
115 ClearPageNosaveFree(page
);
120 /* struct linked_page is used to build chains of pages */
122 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
125 struct linked_page
*next
;
126 char data
[LINKED_PAGE_DATA_SIZE
];
127 } __attribute__((packed
));
130 free_list_of_pages(struct linked_page
*list
, int clear_page_nosave
)
133 struct linked_page
*lp
= list
->next
;
135 free_image_page(list
, clear_page_nosave
);
141 * struct chain_allocator is used for allocating small objects out of
142 * a linked list of pages called 'the chain'.
144 * The chain grows each time when there is no room for a new object in
145 * the current page. The allocated objects cannot be freed individually.
146 * It is only possible to free them all at once, by freeing the entire
149 * NOTE: The chain allocator may be inefficient if the allocated objects
150 * are not much smaller than PAGE_SIZE.
153 struct chain_allocator
{
154 struct linked_page
*chain
; /* the chain */
155 unsigned int used_space
; /* total size of objects allocated out
156 * of the current page
158 gfp_t gfp_mask
; /* mask for allocating pages */
159 int safe_needed
; /* if set, only "safe" pages are allocated */
163 chain_init(struct chain_allocator
*ca
, gfp_t gfp_mask
, int safe_needed
)
166 ca
->used_space
= LINKED_PAGE_DATA_SIZE
;
167 ca
->gfp_mask
= gfp_mask
;
168 ca
->safe_needed
= safe_needed
;
171 static void *chain_alloc(struct chain_allocator
*ca
, unsigned int size
)
175 if (LINKED_PAGE_DATA_SIZE
- ca
->used_space
< size
) {
176 struct linked_page
*lp
;
178 lp
= get_image_page(ca
->gfp_mask
, ca
->safe_needed
);
182 lp
->next
= ca
->chain
;
186 ret
= ca
->chain
->data
+ ca
->used_space
;
187 ca
->used_space
+= size
;
191 static void chain_free(struct chain_allocator
*ca
, int clear_page_nosave
)
193 free_list_of_pages(ca
->chain
, clear_page_nosave
);
194 memset(ca
, 0, sizeof(struct chain_allocator
));
198 * Data types related to memory bitmaps.
200 * Memory bitmap is a structure consiting of many linked lists of
201 * objects. The main list's elements are of type struct zone_bitmap
202 * and each of them corresonds to one zone. For each zone bitmap
203 * object there is a list of objects of type struct bm_block that
204 * represent each blocks of bit chunks in which information is
207 * struct memory_bitmap contains a pointer to the main list of zone
208 * bitmap objects, a struct bm_position used for browsing the bitmap,
209 * and a pointer to the list of pages used for allocating all of the
210 * zone bitmap objects and bitmap block objects.
212 * NOTE: It has to be possible to lay out the bitmap in memory
213 * using only allocations of order 0. Additionally, the bitmap is
214 * designed to work with arbitrary number of zones (this is over the
215 * top for now, but let's avoid making unnecessary assumptions ;-).
217 * struct zone_bitmap contains a pointer to a list of bitmap block
218 * objects and a pointer to the bitmap block object that has been
219 * most recently used for setting bits. Additionally, it contains the
220 * pfns that correspond to the start and end of the represented zone.
222 * struct bm_block contains a pointer to the memory page in which
223 * information is stored (in the form of a block of bit chunks
224 * of type unsigned long each). It also contains the pfns that
225 * correspond to the start and end of the represented memory area and
226 * the number of bit chunks in the block.
228 * NOTE: Memory bitmaps are used for two types of operations only:
229 * "set a bit" and "find the next bit set". Moreover, the searching
230 * is always carried out after all of the "set a bit" operations
234 #define BM_END_OF_MAP (~0UL)
236 #define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long))
237 #define BM_BITS_PER_CHUNK (sizeof(long) << 3)
238 #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
241 struct bm_block
*next
; /* next element of the list */
242 unsigned long start_pfn
; /* pfn represented by the first bit */
243 unsigned long end_pfn
; /* pfn represented by the last bit plus 1 */
244 unsigned int size
; /* number of bit chunks */
245 unsigned long *data
; /* chunks of bits representing pages */
249 struct zone_bitmap
*next
; /* next element of the list */
250 unsigned long start_pfn
; /* minimal pfn in this zone */
251 unsigned long end_pfn
; /* maximal pfn in this zone plus 1 */
252 struct bm_block
*bm_blocks
; /* list of bitmap blocks */
253 struct bm_block
*cur_block
; /* recently used bitmap block */
256 /* strcut bm_position is used for browsing memory bitmaps */
259 struct zone_bitmap
*zone_bm
;
260 struct bm_block
*block
;
265 struct memory_bitmap
{
266 struct zone_bitmap
*zone_bm_list
; /* list of zone bitmaps */
267 struct linked_page
*p_list
; /* list of pages used to store zone
268 * bitmap objects and bitmap block
271 struct bm_position cur
; /* most recently used bit position */
274 /* Functions that operate on memory bitmaps */
276 static inline void memory_bm_reset_chunk(struct memory_bitmap
*bm
)
282 static void memory_bm_position_reset(struct memory_bitmap
*bm
)
284 struct zone_bitmap
*zone_bm
;
286 zone_bm
= bm
->zone_bm_list
;
287 bm
->cur
.zone_bm
= zone_bm
;
288 bm
->cur
.block
= zone_bm
->bm_blocks
;
289 memory_bm_reset_chunk(bm
);
292 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
);
295 * create_bm_block_list - create a list of block bitmap objects
298 static inline struct bm_block
*
299 create_bm_block_list(unsigned int nr_blocks
, struct chain_allocator
*ca
)
301 struct bm_block
*bblist
= NULL
;
303 while (nr_blocks
-- > 0) {
306 bb
= chain_alloc(ca
, sizeof(struct bm_block
));
317 * create_zone_bm_list - create a list of zone bitmap objects
320 static inline struct zone_bitmap
*
321 create_zone_bm_list(unsigned int nr_zones
, struct chain_allocator
*ca
)
323 struct zone_bitmap
*zbmlist
= NULL
;
325 while (nr_zones
-- > 0) {
326 struct zone_bitmap
*zbm
;
328 zbm
= chain_alloc(ca
, sizeof(struct zone_bitmap
));
339 * memory_bm_create - allocate memory for a memory bitmap
343 memory_bm_create(struct memory_bitmap
*bm
, gfp_t gfp_mask
, int safe_needed
)
345 struct chain_allocator ca
;
347 struct zone_bitmap
*zone_bm
;
351 chain_init(&ca
, gfp_mask
, safe_needed
);
353 /* Compute the number of zones */
356 if (populated_zone(zone
))
359 /* Allocate the list of zones bitmap objects */
360 zone_bm
= create_zone_bm_list(nr
, &ca
);
361 bm
->zone_bm_list
= zone_bm
;
363 chain_free(&ca
, PG_UNSAFE_CLEAR
);
367 /* Initialize the zone bitmap objects */
368 for_each_zone(zone
) {
371 if (!populated_zone(zone
))
374 zone_bm
->start_pfn
= zone
->zone_start_pfn
;
375 zone_bm
->end_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
376 /* Allocate the list of bitmap block objects */
377 nr
= DIV_ROUND_UP(zone
->spanned_pages
, BM_BITS_PER_BLOCK
);
378 bb
= create_bm_block_list(nr
, &ca
);
379 zone_bm
->bm_blocks
= bb
;
380 zone_bm
->cur_block
= bb
;
384 nr
= zone
->spanned_pages
;
385 pfn
= zone
->zone_start_pfn
;
386 /* Initialize the bitmap block objects */
390 ptr
= get_image_page(gfp_mask
, safe_needed
);
396 if (nr
>= BM_BITS_PER_BLOCK
) {
397 pfn
+= BM_BITS_PER_BLOCK
;
398 bb
->size
= BM_CHUNKS_PER_BLOCK
;
399 nr
-= BM_BITS_PER_BLOCK
;
401 /* This is executed only once in the loop */
403 bb
->size
= DIV_ROUND_UP(nr
, BM_BITS_PER_CHUNK
);
408 zone_bm
= zone_bm
->next
;
410 bm
->p_list
= ca
.chain
;
411 memory_bm_position_reset(bm
);
415 bm
->p_list
= ca
.chain
;
416 memory_bm_free(bm
, PG_UNSAFE_CLEAR
);
421 * memory_bm_free - free memory occupied by the memory bitmap @bm
424 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
)
426 struct zone_bitmap
*zone_bm
;
428 /* Free the list of bit blocks for each zone_bitmap object */
429 zone_bm
= bm
->zone_bm_list
;
433 bb
= zone_bm
->bm_blocks
;
436 free_image_page(bb
->data
, clear_nosave_free
);
439 zone_bm
= zone_bm
->next
;
441 free_list_of_pages(bm
->p_list
, clear_nosave_free
);
442 bm
->zone_bm_list
= NULL
;
446 * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds
447 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
448 * of @bm->cur_zone_bm are updated.
450 * If the bit cannot be set, the function returns -EINVAL .
454 memory_bm_set_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
456 struct zone_bitmap
*zone_bm
;
459 /* Check if the pfn is from the current zone */
460 zone_bm
= bm
->cur
.zone_bm
;
461 if (pfn
< zone_bm
->start_pfn
|| pfn
>= zone_bm
->end_pfn
) {
462 zone_bm
= bm
->zone_bm_list
;
463 /* We don't assume that the zones are sorted by pfns */
464 while (pfn
< zone_bm
->start_pfn
|| pfn
>= zone_bm
->end_pfn
) {
465 zone_bm
= zone_bm
->next
;
466 if (unlikely(!zone_bm
))
469 bm
->cur
.zone_bm
= zone_bm
;
471 /* Check if the pfn corresponds to the current bitmap block */
472 bb
= zone_bm
->cur_block
;
473 if (pfn
< bb
->start_pfn
)
474 bb
= zone_bm
->bm_blocks
;
476 while (pfn
>= bb
->end_pfn
) {
481 zone_bm
->cur_block
= bb
;
482 pfn
-= bb
->start_pfn
;
483 set_bit(pfn
% BM_BITS_PER_CHUNK
, bb
->data
+ pfn
/ BM_BITS_PER_CHUNK
);
487 /* Two auxiliary functions for memory_bm_next_pfn */
489 /* Find the first set bit in the given chunk, if there is one */
491 static inline int next_bit_in_chunk(int bit
, unsigned long *chunk_p
)
494 while (bit
< BM_BITS_PER_CHUNK
) {
495 if (test_bit(bit
, chunk_p
))
503 /* Find a chunk containing some bits set in given block of bits */
505 static inline int next_chunk_in_block(int n
, struct bm_block
*bb
)
508 while (n
< bb
->size
) {
518 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
519 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
522 * It is required to run memory_bm_position_reset() before the first call to
526 static unsigned long memory_bm_next_pfn(struct memory_bitmap
*bm
)
528 struct zone_bitmap
*zone_bm
;
536 chunk
= bm
->cur
.chunk
;
539 bit
= next_bit_in_chunk(bit
, bb
->data
+ chunk
);
543 chunk
= next_chunk_in_block(chunk
, bb
);
545 } while (chunk
>= 0);
548 memory_bm_reset_chunk(bm
);
550 zone_bm
= bm
->cur
.zone_bm
->next
;
552 bm
->cur
.zone_bm
= zone_bm
;
553 bm
->cur
.block
= zone_bm
->bm_blocks
;
554 memory_bm_reset_chunk(bm
);
557 memory_bm_position_reset(bm
);
558 return BM_END_OF_MAP
;
561 bm
->cur
.chunk
= chunk
;
563 return bb
->start_pfn
+ chunk
* BM_BITS_PER_CHUNK
+ bit
;
567 * snapshot_additional_pages - estimate the number of additional pages
568 * be needed for setting up the suspend image data structures for given
569 * zone (usually the returned value is greater than the exact number)
572 unsigned int snapshot_additional_pages(struct zone
*zone
)
576 res
= DIV_ROUND_UP(zone
->spanned_pages
, BM_BITS_PER_BLOCK
);
577 res
+= DIV_ROUND_UP(res
* sizeof(struct bm_block
), PAGE_SIZE
);
581 #ifdef CONFIG_HIGHMEM
583 * count_free_highmem_pages - compute the total number of free highmem
584 * pages, system-wide.
587 static unsigned int count_free_highmem_pages(void)
590 unsigned int cnt
= 0;
593 if (populated_zone(zone
) && is_highmem(zone
))
594 cnt
+= zone_page_state(zone
, NR_FREE_PAGES
);
600 * saveable_highmem_page - Determine whether a highmem page should be
601 * included in the suspend image.
603 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
604 * and it isn't a part of a free chunk of pages.
607 static struct page
*saveable_highmem_page(unsigned long pfn
)
614 page
= pfn_to_page(pfn
);
616 BUG_ON(!PageHighMem(page
));
618 if (PageNosave(page
) || PageReserved(page
) || PageNosaveFree(page
))
625 * count_highmem_pages - compute the total number of saveable highmem
629 unsigned int count_highmem_pages(void)
634 for_each_zone(zone
) {
635 unsigned long pfn
, max_zone_pfn
;
637 if (!is_highmem(zone
))
640 mark_free_pages(zone
);
641 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
642 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
643 if (saveable_highmem_page(pfn
))
649 static inline void *saveable_highmem_page(unsigned long pfn
) { return NULL
; }
650 static inline unsigned int count_highmem_pages(void) { return 0; }
651 #endif /* CONFIG_HIGHMEM */
654 * pfn_is_nosave - check if given pfn is in the 'nosave' section
657 static inline int pfn_is_nosave(unsigned long pfn
)
659 unsigned long nosave_begin_pfn
= __pa(&__nosave_begin
) >> PAGE_SHIFT
;
660 unsigned long nosave_end_pfn
= PAGE_ALIGN(__pa(&__nosave_end
)) >> PAGE_SHIFT
;
661 return (pfn
>= nosave_begin_pfn
) && (pfn
< nosave_end_pfn
);
665 * saveable - Determine whether a non-highmem page should be included in
668 * We should save the page if it isn't Nosave, and is not in the range
669 * of pages statically defined as 'unsaveable', and it isn't a part of
670 * a free chunk of pages.
673 static struct page
*saveable_page(unsigned long pfn
)
680 page
= pfn_to_page(pfn
);
682 BUG_ON(PageHighMem(page
));
684 if (PageNosave(page
) || PageNosaveFree(page
))
687 if (PageReserved(page
) && pfn_is_nosave(pfn
))
694 * count_data_pages - compute the total number of saveable non-highmem
698 unsigned int count_data_pages(void)
701 unsigned long pfn
, max_zone_pfn
;
704 for_each_zone(zone
) {
705 if (is_highmem(zone
))
708 mark_free_pages(zone
);
709 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
710 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
711 if(saveable_page(pfn
))
717 /* This is needed, because copy_page and memcpy are not usable for copying
720 static inline void do_copy_page(long *dst
, long *src
)
724 for (n
= PAGE_SIZE
/ sizeof(long); n
; n
--)
728 #ifdef CONFIG_HIGHMEM
729 static inline struct page
*
730 page_is_saveable(struct zone
*zone
, unsigned long pfn
)
732 return is_highmem(zone
) ?
733 saveable_highmem_page(pfn
) : saveable_page(pfn
);
737 copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
739 struct page
*s_page
, *d_page
;
742 s_page
= pfn_to_page(src_pfn
);
743 d_page
= pfn_to_page(dst_pfn
);
744 if (PageHighMem(s_page
)) {
745 src
= kmap_atomic(s_page
, KM_USER0
);
746 dst
= kmap_atomic(d_page
, KM_USER1
);
747 do_copy_page(dst
, src
);
748 kunmap_atomic(src
, KM_USER0
);
749 kunmap_atomic(dst
, KM_USER1
);
751 src
= page_address(s_page
);
752 if (PageHighMem(d_page
)) {
753 /* Page pointed to by src may contain some kernel
754 * data modified by kmap_atomic()
756 do_copy_page(buffer
, src
);
757 dst
= kmap_atomic(pfn_to_page(dst_pfn
), KM_USER0
);
758 memcpy(dst
, buffer
, PAGE_SIZE
);
759 kunmap_atomic(dst
, KM_USER0
);
761 dst
= page_address(d_page
);
762 do_copy_page(dst
, src
);
767 #define page_is_saveable(zone, pfn) saveable_page(pfn)
770 copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
772 do_copy_page(page_address(pfn_to_page(dst_pfn
)),
773 page_address(pfn_to_page(src_pfn
)));
775 #endif /* CONFIG_HIGHMEM */
778 copy_data_pages(struct memory_bitmap
*copy_bm
, struct memory_bitmap
*orig_bm
)
783 for_each_zone(zone
) {
784 unsigned long max_zone_pfn
;
786 mark_free_pages(zone
);
787 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
788 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
789 if (page_is_saveable(zone
, pfn
))
790 memory_bm_set_bit(orig_bm
, pfn
);
792 memory_bm_position_reset(orig_bm
);
793 memory_bm_position_reset(copy_bm
);
795 pfn
= memory_bm_next_pfn(orig_bm
);
796 if (likely(pfn
!= BM_END_OF_MAP
))
797 copy_data_page(memory_bm_next_pfn(copy_bm
), pfn
);
798 } while (pfn
!= BM_END_OF_MAP
);
801 /* Total number of image pages */
802 static unsigned int nr_copy_pages
;
803 /* Number of pages needed for saving the original pfns of the image pages */
804 static unsigned int nr_meta_pages
;
807 * swsusp_free - free pages allocated for the suspend.
809 * Suspend pages are alocated before the atomic copy is made, so we
810 * need to release them after the resume.
813 void swsusp_free(void)
816 unsigned long pfn
, max_zone_pfn
;
818 for_each_zone(zone
) {
819 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
820 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
821 if (pfn_valid(pfn
)) {
822 struct page
*page
= pfn_to_page(pfn
);
824 if (PageNosave(page
) && PageNosaveFree(page
)) {
825 ClearPageNosave(page
);
826 ClearPageNosaveFree(page
);
833 restore_pblist
= NULL
;
837 #ifdef CONFIG_HIGHMEM
839 * count_pages_for_highmem - compute the number of non-highmem pages
840 * that will be necessary for creating copies of highmem pages.
843 static unsigned int count_pages_for_highmem(unsigned int nr_highmem
)
845 unsigned int free_highmem
= count_free_highmem_pages();
847 if (free_highmem
>= nr_highmem
)
850 nr_highmem
-= free_highmem
;
856 count_pages_for_highmem(unsigned int nr_highmem
) { return 0; }
857 #endif /* CONFIG_HIGHMEM */
860 * enough_free_mem - Make sure we have enough free memory for the
864 static int enough_free_mem(unsigned int nr_pages
, unsigned int nr_highmem
)
867 unsigned int free
= 0, meta
= 0;
869 for_each_zone(zone
) {
870 meta
+= snapshot_additional_pages(zone
);
871 if (!is_highmem(zone
))
872 free
+= zone_page_state(zone
, NR_FREE_PAGES
);
875 nr_pages
+= count_pages_for_highmem(nr_highmem
);
876 pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
877 nr_pages
, PAGES_FOR_IO
, meta
, free
);
879 return free
> nr_pages
+ PAGES_FOR_IO
+ meta
;
882 #ifdef CONFIG_HIGHMEM
884 * get_highmem_buffer - if there are some highmem pages in the suspend
885 * image, we may need the buffer to copy them and/or load their data.
888 static inline int get_highmem_buffer(int safe_needed
)
890 buffer
= get_image_page(GFP_ATOMIC
| __GFP_COLD
, safe_needed
);
891 return buffer
? 0 : -ENOMEM
;
895 * alloc_highmem_image_pages - allocate some highmem pages for the image.
896 * Try to allocate as many pages as needed, but if the number of free
897 * highmem pages is lesser than that, allocate them all.
900 static inline unsigned int
901 alloc_highmem_image_pages(struct memory_bitmap
*bm
, unsigned int nr_highmem
)
903 unsigned int to_alloc
= count_free_highmem_pages();
905 if (to_alloc
> nr_highmem
)
906 to_alloc
= nr_highmem
;
908 nr_highmem
-= to_alloc
;
909 while (to_alloc
-- > 0) {
912 page
= alloc_image_page(__GFP_HIGHMEM
);
913 memory_bm_set_bit(bm
, page_to_pfn(page
));
918 static inline int get_highmem_buffer(int safe_needed
) { return 0; }
920 static inline unsigned int
921 alloc_highmem_image_pages(struct memory_bitmap
*bm
, unsigned int n
) { return 0; }
922 #endif /* CONFIG_HIGHMEM */
925 * swsusp_alloc - allocate memory for the suspend image
927 * We first try to allocate as many highmem pages as there are
928 * saveable highmem pages in the system. If that fails, we allocate
929 * non-highmem pages for the copies of the remaining highmem ones.
931 * In this approach it is likely that the copies of highmem pages will
932 * also be located in the high memory, because of the way in which
933 * copy_data_pages() works.
937 swsusp_alloc(struct memory_bitmap
*orig_bm
, struct memory_bitmap
*copy_bm
,
938 unsigned int nr_pages
, unsigned int nr_highmem
)
942 error
= memory_bm_create(orig_bm
, GFP_ATOMIC
| __GFP_COLD
, PG_ANY
);
946 error
= memory_bm_create(copy_bm
, GFP_ATOMIC
| __GFP_COLD
, PG_ANY
);
950 if (nr_highmem
> 0) {
951 error
= get_highmem_buffer(PG_ANY
);
955 nr_pages
+= alloc_highmem_image_pages(copy_bm
, nr_highmem
);
957 while (nr_pages
-- > 0) {
958 struct page
*page
= alloc_image_page(GFP_ATOMIC
| __GFP_COLD
);
963 memory_bm_set_bit(copy_bm
, page_to_pfn(page
));
972 /* Memory bitmap used for marking saveable pages (during suspend) or the
973 * suspend image pages (during resume)
975 static struct memory_bitmap orig_bm
;
976 /* Memory bitmap used on suspend for marking allocated pages that will contain
977 * the copies of saveable pages. During resume it is initially used for
978 * marking the suspend image pages, but then its set bits are duplicated in
979 * @orig_bm and it is released. Next, on systems with high memory, it may be
980 * used for marking "safe" highmem pages, but it has to be reinitialized for
983 static struct memory_bitmap copy_bm
;
985 asmlinkage
int swsusp_save(void)
987 unsigned int nr_pages
, nr_highmem
;
989 printk("swsusp: critical section: \n");
992 nr_pages
= count_data_pages();
993 nr_highmem
= count_highmem_pages();
994 printk("swsusp: Need to copy %u pages\n", nr_pages
+ nr_highmem
);
996 if (!enough_free_mem(nr_pages
, nr_highmem
)) {
997 printk(KERN_ERR
"swsusp: Not enough free memory\n");
1001 if (swsusp_alloc(&orig_bm
, ©_bm
, nr_pages
, nr_highmem
)) {
1002 printk(KERN_ERR
"swsusp: Memory allocation failed\n");
1006 /* During allocating of suspend pagedir, new cold pages may appear.
1009 drain_local_pages();
1010 copy_data_pages(©_bm
, &orig_bm
);
1013 * End of critical section. From now on, we can write to memory,
1014 * but we should not touch disk. This specially means we must _not_
1015 * touch swap space! Except we must write out our image of course.
1018 nr_pages
+= nr_highmem
;
1019 nr_copy_pages
= nr_pages
;
1020 nr_meta_pages
= DIV_ROUND_UP(nr_pages
* sizeof(long), PAGE_SIZE
);
1022 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages
);
1027 static void init_header(struct swsusp_info
*info
)
1029 memset(info
, 0, sizeof(struct swsusp_info
));
1030 info
->version_code
= LINUX_VERSION_CODE
;
1031 info
->num_physpages
= num_physpages
;
1032 memcpy(&info
->uts
, init_utsname(), sizeof(struct new_utsname
));
1033 info
->cpus
= num_online_cpus();
1034 info
->image_pages
= nr_copy_pages
;
1035 info
->pages
= nr_copy_pages
+ nr_meta_pages
+ 1;
1036 info
->size
= info
->pages
;
1037 info
->size
<<= PAGE_SHIFT
;
1041 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1042 * are stored in the array @buf[] (1 page at a time)
1046 pack_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
1050 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
1051 buf
[j
] = memory_bm_next_pfn(bm
);
1052 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
1058 * snapshot_read_next - used for reading the system memory snapshot.
1060 * On the first call to it @handle should point to a zeroed
1061 * snapshot_handle structure. The structure gets updated and a pointer
1062 * to it should be passed to this function every next time.
1064 * The @count parameter should contain the number of bytes the caller
1065 * wants to read from the snapshot. It must not be zero.
1067 * On success the function returns a positive number. Then, the caller
1068 * is allowed to read up to the returned number of bytes from the memory
1069 * location computed by the data_of() macro. The number returned
1070 * may be smaller than @count, but this only happens if the read would
1071 * cross a page boundary otherwise.
1073 * The function returns 0 to indicate the end of data stream condition,
1074 * and a negative number is returned on error. In such cases the
1075 * structure pointed to by @handle is not updated and should not be used
1079 int snapshot_read_next(struct snapshot_handle
*handle
, size_t count
)
1081 if (handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
1085 /* This makes the buffer be freed by swsusp_free() */
1086 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
1090 if (!handle
->offset
) {
1091 init_header((struct swsusp_info
*)buffer
);
1092 handle
->buffer
= buffer
;
1093 memory_bm_position_reset(&orig_bm
);
1094 memory_bm_position_reset(©_bm
);
1096 if (handle
->prev
< handle
->cur
) {
1097 if (handle
->cur
<= nr_meta_pages
) {
1098 memset(buffer
, 0, PAGE_SIZE
);
1099 pack_pfns(buffer
, &orig_bm
);
1103 page
= pfn_to_page(memory_bm_next_pfn(©_bm
));
1104 if (PageHighMem(page
)) {
1105 /* Highmem pages are copied to the buffer,
1106 * because we can't return with a kmapped
1107 * highmem page (we may not be called again).
1111 kaddr
= kmap_atomic(page
, KM_USER0
);
1112 memcpy(buffer
, kaddr
, PAGE_SIZE
);
1113 kunmap_atomic(kaddr
, KM_USER0
);
1114 handle
->buffer
= buffer
;
1116 handle
->buffer
= page_address(page
);
1119 handle
->prev
= handle
->cur
;
1121 handle
->buf_offset
= handle
->cur_offset
;
1122 if (handle
->cur_offset
+ count
>= PAGE_SIZE
) {
1123 count
= PAGE_SIZE
- handle
->cur_offset
;
1124 handle
->cur_offset
= 0;
1127 handle
->cur_offset
+= count
;
1129 handle
->offset
+= count
;
1134 * mark_unsafe_pages - mark the pages that cannot be used for storing
1135 * the image during resume, because they conflict with the pages that
1136 * had been used before suspend
1139 static int mark_unsafe_pages(struct memory_bitmap
*bm
)
1142 unsigned long pfn
, max_zone_pfn
;
1144 /* Clear page flags */
1145 for_each_zone(zone
) {
1146 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
1147 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1149 ClearPageNosaveFree(pfn_to_page(pfn
));
1152 /* Mark pages that correspond to the "original" pfns as "unsafe" */
1153 memory_bm_position_reset(bm
);
1155 pfn
= memory_bm_next_pfn(bm
);
1156 if (likely(pfn
!= BM_END_OF_MAP
)) {
1157 if (likely(pfn_valid(pfn
)))
1158 SetPageNosaveFree(pfn_to_page(pfn
));
1162 } while (pfn
!= BM_END_OF_MAP
);
1164 allocated_unsafe_pages
= 0;
1170 duplicate_memory_bitmap(struct memory_bitmap
*dst
, struct memory_bitmap
*src
)
1174 memory_bm_position_reset(src
);
1175 pfn
= memory_bm_next_pfn(src
);
1176 while (pfn
!= BM_END_OF_MAP
) {
1177 memory_bm_set_bit(dst
, pfn
);
1178 pfn
= memory_bm_next_pfn(src
);
1182 static inline int check_header(struct swsusp_info
*info
)
1184 char *reason
= NULL
;
1186 if (info
->version_code
!= LINUX_VERSION_CODE
)
1187 reason
= "kernel version";
1188 if (info
->num_physpages
!= num_physpages
)
1189 reason
= "memory size";
1190 if (strcmp(info
->uts
.sysname
,init_utsname()->sysname
))
1191 reason
= "system type";
1192 if (strcmp(info
->uts
.release
,init_utsname()->release
))
1193 reason
= "kernel release";
1194 if (strcmp(info
->uts
.version
,init_utsname()->version
))
1196 if (strcmp(info
->uts
.machine
,init_utsname()->machine
))
1199 printk(KERN_ERR
"swsusp: Resume mismatch: %s\n", reason
);
1206 * load header - check the image header and copy data from it
1210 load_header(struct swsusp_info
*info
)
1214 restore_pblist
= NULL
;
1215 error
= check_header(info
);
1217 nr_copy_pages
= info
->image_pages
;
1218 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
1224 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1225 * the corresponding bit in the memory bitmap @bm
1229 unpack_orig_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
1233 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
1234 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
1237 memory_bm_set_bit(bm
, buf
[j
]);
1241 /* List of "safe" pages that may be used to store data loaded from the suspend
1244 static struct linked_page
*safe_pages_list
;
1246 #ifdef CONFIG_HIGHMEM
1247 /* struct highmem_pbe is used for creating the list of highmem pages that
1248 * should be restored atomically during the resume from disk, because the page
1249 * frames they have occupied before the suspend are in use.
1251 struct highmem_pbe
{
1252 struct page
*copy_page
; /* data is here now */
1253 struct page
*orig_page
; /* data was here before the suspend */
1254 struct highmem_pbe
*next
;
1257 /* List of highmem PBEs needed for restoring the highmem pages that were
1258 * allocated before the suspend and included in the suspend image, but have
1259 * also been allocated by the "resume" kernel, so their contents cannot be
1260 * written directly to their "original" page frames.
1262 static struct highmem_pbe
*highmem_pblist
;
1265 * count_highmem_image_pages - compute the number of highmem pages in the
1266 * suspend image. The bits in the memory bitmap @bm that correspond to the
1267 * image pages are assumed to be set.
1270 static unsigned int count_highmem_image_pages(struct memory_bitmap
*bm
)
1273 unsigned int cnt
= 0;
1275 memory_bm_position_reset(bm
);
1276 pfn
= memory_bm_next_pfn(bm
);
1277 while (pfn
!= BM_END_OF_MAP
) {
1278 if (PageHighMem(pfn_to_page(pfn
)))
1281 pfn
= memory_bm_next_pfn(bm
);
1287 * prepare_highmem_image - try to allocate as many highmem pages as
1288 * there are highmem image pages (@nr_highmem_p points to the variable
1289 * containing the number of highmem image pages). The pages that are
1290 * "safe" (ie. will not be overwritten when the suspend image is
1291 * restored) have the corresponding bits set in @bm (it must be
1294 * NOTE: This function should not be called if there are no highmem
1298 static unsigned int safe_highmem_pages
;
1300 static struct memory_bitmap
*safe_highmem_bm
;
1303 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
1305 unsigned int to_alloc
;
1307 if (memory_bm_create(bm
, GFP_ATOMIC
, PG_SAFE
))
1310 if (get_highmem_buffer(PG_SAFE
))
1313 to_alloc
= count_free_highmem_pages();
1314 if (to_alloc
> *nr_highmem_p
)
1315 to_alloc
= *nr_highmem_p
;
1317 *nr_highmem_p
= to_alloc
;
1319 safe_highmem_pages
= 0;
1320 while (to_alloc
-- > 0) {
1323 page
= alloc_page(__GFP_HIGHMEM
);
1324 if (!PageNosaveFree(page
)) {
1325 /* The page is "safe", set its bit the bitmap */
1326 memory_bm_set_bit(bm
, page_to_pfn(page
));
1327 safe_highmem_pages
++;
1329 /* Mark the page as allocated */
1330 SetPageNosave(page
);
1331 SetPageNosaveFree(page
);
1333 memory_bm_position_reset(bm
);
1334 safe_highmem_bm
= bm
;
1339 * get_highmem_page_buffer - for given highmem image page find the buffer
1340 * that suspend_write_next() should set for its caller to write to.
1342 * If the page is to be saved to its "original" page frame or a copy of
1343 * the page is to be made in the highmem, @buffer is returned. Otherwise,
1344 * the copy of the page is to be made in normal memory, so the address of
1345 * the copy is returned.
1347 * If @buffer is returned, the caller of suspend_write_next() will write
1348 * the page's contents to @buffer, so they will have to be copied to the
1349 * right location on the next call to suspend_write_next() and it is done
1350 * with the help of copy_last_highmem_page(). For this purpose, if
1351 * @buffer is returned, @last_highmem page is set to the page to which
1352 * the data will have to be copied from @buffer.
1355 static struct page
*last_highmem_page
;
1358 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
1360 struct highmem_pbe
*pbe
;
1363 if (PageNosave(page
) && PageNosaveFree(page
)) {
1364 /* We have allocated the "original" page frame and we can
1365 * use it directly to store the loaded page.
1367 last_highmem_page
= page
;
1370 /* The "original" page frame has not been allocated and we have to
1371 * use a "safe" page frame to store the loaded page.
1373 pbe
= chain_alloc(ca
, sizeof(struct highmem_pbe
));
1378 pbe
->orig_page
= page
;
1379 if (safe_highmem_pages
> 0) {
1382 /* Copy of the page will be stored in high memory */
1384 tmp
= pfn_to_page(memory_bm_next_pfn(safe_highmem_bm
));
1385 safe_highmem_pages
--;
1386 last_highmem_page
= tmp
;
1387 pbe
->copy_page
= tmp
;
1389 /* Copy of the page will be stored in normal memory */
1390 kaddr
= safe_pages_list
;
1391 safe_pages_list
= safe_pages_list
->next
;
1392 pbe
->copy_page
= virt_to_page(kaddr
);
1394 pbe
->next
= highmem_pblist
;
1395 highmem_pblist
= pbe
;
1400 * copy_last_highmem_page - copy the contents of a highmem image from
1401 * @buffer, where the caller of snapshot_write_next() has place them,
1402 * to the right location represented by @last_highmem_page .
1405 static void copy_last_highmem_page(void)
1407 if (last_highmem_page
) {
1410 dst
= kmap_atomic(last_highmem_page
, KM_USER0
);
1411 memcpy(dst
, buffer
, PAGE_SIZE
);
1412 kunmap_atomic(dst
, KM_USER0
);
1413 last_highmem_page
= NULL
;
1417 static inline int last_highmem_page_copied(void)
1419 return !last_highmem_page
;
1422 static inline void free_highmem_data(void)
1424 if (safe_highmem_bm
)
1425 memory_bm_free(safe_highmem_bm
, PG_UNSAFE_CLEAR
);
1428 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
1431 static inline int get_safe_write_buffer(void) { return 0; }
1434 count_highmem_image_pages(struct memory_bitmap
*bm
) { return 0; }
1437 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
1442 static inline void *
1443 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
1448 static inline void copy_last_highmem_page(void) {}
1449 static inline int last_highmem_page_copied(void) { return 1; }
1450 static inline void free_highmem_data(void) {}
1451 #endif /* CONFIG_HIGHMEM */
1454 * prepare_image - use the memory bitmap @bm to mark the pages that will
1455 * be overwritten in the process of restoring the system memory state
1456 * from the suspend image ("unsafe" pages) and allocate memory for the
1459 * The idea is to allocate a new memory bitmap first and then allocate
1460 * as many pages as needed for the image data, but not to assign these
1461 * pages to specific tasks initially. Instead, we just mark them as
1462 * allocated and create a lists of "safe" pages that will be used
1463 * later. On systems with high memory a list of "safe" highmem pages is
1467 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
1470 prepare_image(struct memory_bitmap
*new_bm
, struct memory_bitmap
*bm
)
1472 unsigned int nr_pages
, nr_highmem
;
1473 struct linked_page
*sp_list
, *lp
;
1476 /* If there is no highmem, the buffer will not be necessary */
1477 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
1480 nr_highmem
= count_highmem_image_pages(bm
);
1481 error
= mark_unsafe_pages(bm
);
1485 error
= memory_bm_create(new_bm
, GFP_ATOMIC
, PG_SAFE
);
1489 duplicate_memory_bitmap(new_bm
, bm
);
1490 memory_bm_free(bm
, PG_UNSAFE_KEEP
);
1491 if (nr_highmem
> 0) {
1492 error
= prepare_highmem_image(bm
, &nr_highmem
);
1496 /* Reserve some safe pages for potential later use.
1498 * NOTE: This way we make sure there will be enough safe pages for the
1499 * chain_alloc() in get_buffer(). It is a bit wasteful, but
1500 * nr_copy_pages cannot be greater than 50% of the memory anyway.
1503 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
1504 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
1505 nr_pages
= DIV_ROUND_UP(nr_pages
, PBES_PER_LINKED_PAGE
);
1506 while (nr_pages
> 0) {
1507 lp
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
1516 /* Preallocate memory for the image */
1517 safe_pages_list
= NULL
;
1518 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
1519 while (nr_pages
> 0) {
1520 lp
= (struct linked_page
*)get_zeroed_page(GFP_ATOMIC
);
1525 if (!PageNosaveFree(virt_to_page(lp
))) {
1526 /* The page is "safe", add it to the list */
1527 lp
->next
= safe_pages_list
;
1528 safe_pages_list
= lp
;
1530 /* Mark the page as allocated */
1531 SetPageNosave(virt_to_page(lp
));
1532 SetPageNosaveFree(virt_to_page(lp
));
1535 /* Free the reserved safe pages so that chain_alloc() can use them */
1538 free_image_page(sp_list
, PG_UNSAFE_CLEAR
);
1549 * get_buffer - compute the address that snapshot_write_next() should
1550 * set for its caller to write to.
1553 static void *get_buffer(struct memory_bitmap
*bm
, struct chain_allocator
*ca
)
1556 struct page
*page
= pfn_to_page(memory_bm_next_pfn(bm
));
1558 if (PageHighMem(page
))
1559 return get_highmem_page_buffer(page
, ca
);
1561 if (PageNosave(page
) && PageNosaveFree(page
))
1562 /* We have allocated the "original" page frame and we can
1563 * use it directly to store the loaded page.
1565 return page_address(page
);
1567 /* The "original" page frame has not been allocated and we have to
1568 * use a "safe" page frame to store the loaded page.
1570 pbe
= chain_alloc(ca
, sizeof(struct pbe
));
1575 pbe
->orig_address
= page_address(page
);
1576 pbe
->address
= safe_pages_list
;
1577 safe_pages_list
= safe_pages_list
->next
;
1578 pbe
->next
= restore_pblist
;
1579 restore_pblist
= pbe
;
1580 return pbe
->address
;
1584 * snapshot_write_next - used for writing the system memory snapshot.
1586 * On the first call to it @handle should point to a zeroed
1587 * snapshot_handle structure. The structure gets updated and a pointer
1588 * to it should be passed to this function every next time.
1590 * The @count parameter should contain the number of bytes the caller
1591 * wants to write to the image. It must not be zero.
1593 * On success the function returns a positive number. Then, the caller
1594 * is allowed to write up to the returned number of bytes to the memory
1595 * location computed by the data_of() macro. The number returned
1596 * may be smaller than @count, but this only happens if the write would
1597 * cross a page boundary otherwise.
1599 * The function returns 0 to indicate the "end of file" condition,
1600 * and a negative number is returned on error. In such cases the
1601 * structure pointed to by @handle is not updated and should not be used
1605 int snapshot_write_next(struct snapshot_handle
*handle
, size_t count
)
1607 static struct chain_allocator ca
;
1610 /* Check if we have already loaded the entire image */
1611 if (handle
->prev
&& handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
1614 if (handle
->offset
== 0) {
1616 /* This makes the buffer be freed by swsusp_free() */
1617 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
1622 handle
->buffer
= buffer
;
1624 handle
->sync_read
= 1;
1625 if (handle
->prev
< handle
->cur
) {
1626 if (handle
->prev
== 0) {
1627 error
= load_header(buffer
);
1631 error
= memory_bm_create(©_bm
, GFP_ATOMIC
, PG_ANY
);
1635 } else if (handle
->prev
<= nr_meta_pages
) {
1636 unpack_orig_pfns(buffer
, ©_bm
);
1637 if (handle
->prev
== nr_meta_pages
) {
1638 error
= prepare_image(&orig_bm
, ©_bm
);
1642 chain_init(&ca
, GFP_ATOMIC
, PG_SAFE
);
1643 memory_bm_position_reset(&orig_bm
);
1644 restore_pblist
= NULL
;
1645 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
1646 handle
->sync_read
= 0;
1647 if (!handle
->buffer
)
1651 copy_last_highmem_page();
1652 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
1653 if (handle
->buffer
!= buffer
)
1654 handle
->sync_read
= 0;
1656 handle
->prev
= handle
->cur
;
1658 handle
->buf_offset
= handle
->cur_offset
;
1659 if (handle
->cur_offset
+ count
>= PAGE_SIZE
) {
1660 count
= PAGE_SIZE
- handle
->cur_offset
;
1661 handle
->cur_offset
= 0;
1664 handle
->cur_offset
+= count
;
1666 handle
->offset
+= count
;
1671 * snapshot_write_finalize - must be called after the last call to
1672 * snapshot_write_next() in case the last page in the image happens
1673 * to be a highmem page and its contents should be stored in the
1674 * highmem. Additionally, it releases the memory that will not be
1678 void snapshot_write_finalize(struct snapshot_handle
*handle
)
1680 copy_last_highmem_page();
1681 /* Free only if we have loaded the image entirely */
1682 if (handle
->prev
&& handle
->cur
> nr_meta_pages
+ nr_copy_pages
) {
1683 memory_bm_free(&orig_bm
, PG_UNSAFE_CLEAR
);
1684 free_highmem_data();
1688 int snapshot_image_loaded(struct snapshot_handle
*handle
)
1690 return !(!nr_copy_pages
|| !last_highmem_page_copied() ||
1691 handle
->cur
<= nr_meta_pages
+ nr_copy_pages
);
1694 #ifdef CONFIG_HIGHMEM
1695 /* Assumes that @buf is ready and points to a "safe" page */
1697 swap_two_pages_data(struct page
*p1
, struct page
*p2
, void *buf
)
1699 void *kaddr1
, *kaddr2
;
1701 kaddr1
= kmap_atomic(p1
, KM_USER0
);
1702 kaddr2
= kmap_atomic(p2
, KM_USER1
);
1703 memcpy(buf
, kaddr1
, PAGE_SIZE
);
1704 memcpy(kaddr1
, kaddr2
, PAGE_SIZE
);
1705 memcpy(kaddr2
, buf
, PAGE_SIZE
);
1706 kunmap_atomic(kaddr1
, KM_USER0
);
1707 kunmap_atomic(kaddr2
, KM_USER1
);
1711 * restore_highmem - for each highmem page that was allocated before
1712 * the suspend and included in the suspend image, and also has been
1713 * allocated by the "resume" kernel swap its current (ie. "before
1714 * resume") contents with the previous (ie. "before suspend") one.
1716 * If the resume eventually fails, we can call this function once
1717 * again and restore the "before resume" highmem state.
1720 int restore_highmem(void)
1722 struct highmem_pbe
*pbe
= highmem_pblist
;
1728 buf
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
1733 swap_two_pages_data(pbe
->copy_page
, pbe
->orig_page
, buf
);
1736 free_image_page(buf
, PG_UNSAFE_CLEAR
);
1739 #endif /* CONFIG_HIGHMEM */