2 * linux/kernel/power/snapshot.c
4 * This file provides system snapshot/restore functionality for swsusp.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * This file is released under the GPLv2.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/compiler.h>
32 #include <asm/uaccess.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
40 static int swsusp_page_is_free(struct page
*);
41 static void swsusp_set_page_forbidden(struct page
*);
42 static void swsusp_unset_page_forbidden(struct page
*);
45 * Number of bytes to reserve for memory allocations made by device drivers
46 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
47 * cause image creation to fail (tunable via /sys/power/reserved_size).
49 unsigned long reserved_size
;
51 void __init
hibernate_reserved_size_init(void)
53 reserved_size
= SPARE_PAGES
* PAGE_SIZE
;
57 * Preferred image size in bytes (tunable via /sys/power/image_size).
58 * When it is set to N, swsusp will do its best to ensure the image
59 * size will not exceed N bytes, but if that is impossible, it will
60 * try to create the smallest image possible.
62 unsigned long image_size
;
64 void __init
hibernate_image_size_init(void)
66 image_size
= ((totalram_pages
* 2) / 5) * PAGE_SIZE
;
69 /* List of PBEs needed for restoring the pages that were allocated before
70 * the suspend and included in the suspend image, but have also been
71 * allocated by the "resume" kernel, so their contents cannot be written
72 * directly to their "original" page frames.
74 struct pbe
*restore_pblist
;
76 /* Pointer to an auxiliary buffer (1 page) */
80 * @safe_needed - on resume, for storing the PBE list and the image,
81 * we can only use memory pages that do not conflict with the pages
82 * used before suspend. The unsafe pages have PageNosaveFree set
83 * and we count them using unsafe_pages.
85 * Each allocated image page is marked as PageNosave and PageNosaveFree
86 * so that swsusp_free() can release it.
91 #define PG_UNSAFE_CLEAR 1
92 #define PG_UNSAFE_KEEP 0
94 static unsigned int allocated_unsafe_pages
;
96 static void *get_image_page(gfp_t gfp_mask
, int safe_needed
)
100 res
= (void *)get_zeroed_page(gfp_mask
);
102 while (res
&& swsusp_page_is_free(virt_to_page(res
))) {
103 /* The page is unsafe, mark it for swsusp_free() */
104 swsusp_set_page_forbidden(virt_to_page(res
));
105 allocated_unsafe_pages
++;
106 res
= (void *)get_zeroed_page(gfp_mask
);
109 swsusp_set_page_forbidden(virt_to_page(res
));
110 swsusp_set_page_free(virt_to_page(res
));
115 unsigned long get_safe_page(gfp_t gfp_mask
)
117 return (unsigned long)get_image_page(gfp_mask
, PG_SAFE
);
120 static struct page
*alloc_image_page(gfp_t gfp_mask
)
124 page
= alloc_page(gfp_mask
);
126 swsusp_set_page_forbidden(page
);
127 swsusp_set_page_free(page
);
133 * free_image_page - free page represented by @addr, allocated with
134 * get_image_page (page flags set by it must be cleared)
137 static inline void free_image_page(void *addr
, int clear_nosave_free
)
141 BUG_ON(!virt_addr_valid(addr
));
143 page
= virt_to_page(addr
);
145 swsusp_unset_page_forbidden(page
);
146 if (clear_nosave_free
)
147 swsusp_unset_page_free(page
);
152 /* struct linked_page is used to build chains of pages */
154 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
157 struct linked_page
*next
;
158 char data
[LINKED_PAGE_DATA_SIZE
];
162 free_list_of_pages(struct linked_page
*list
, int clear_page_nosave
)
165 struct linked_page
*lp
= list
->next
;
167 free_image_page(list
, clear_page_nosave
);
173 * struct chain_allocator is used for allocating small objects out of
174 * a linked list of pages called 'the chain'.
176 * The chain grows each time when there is no room for a new object in
177 * the current page. The allocated objects cannot be freed individually.
178 * It is only possible to free them all at once, by freeing the entire
181 * NOTE: The chain allocator may be inefficient if the allocated objects
182 * are not much smaller than PAGE_SIZE.
185 struct chain_allocator
{
186 struct linked_page
*chain
; /* the chain */
187 unsigned int used_space
; /* total size of objects allocated out
188 * of the current page
190 gfp_t gfp_mask
; /* mask for allocating pages */
191 int safe_needed
; /* if set, only "safe" pages are allocated */
195 chain_init(struct chain_allocator
*ca
, gfp_t gfp_mask
, int safe_needed
)
198 ca
->used_space
= LINKED_PAGE_DATA_SIZE
;
199 ca
->gfp_mask
= gfp_mask
;
200 ca
->safe_needed
= safe_needed
;
203 static void *chain_alloc(struct chain_allocator
*ca
, unsigned int size
)
207 if (LINKED_PAGE_DATA_SIZE
- ca
->used_space
< size
) {
208 struct linked_page
*lp
;
210 lp
= get_image_page(ca
->gfp_mask
, ca
->safe_needed
);
214 lp
->next
= ca
->chain
;
218 ret
= ca
->chain
->data
+ ca
->used_space
;
219 ca
->used_space
+= size
;
224 * Data types related to memory bitmaps.
226 * Memory bitmap is a structure consiting of many linked lists of
227 * objects. The main list's elements are of type struct zone_bitmap
228 * and each of them corresonds to one zone. For each zone bitmap
229 * object there is a list of objects of type struct bm_block that
230 * represent each blocks of bitmap in which information is stored.
232 * struct memory_bitmap contains a pointer to the main list of zone
233 * bitmap objects, a struct bm_position used for browsing the bitmap,
234 * and a pointer to the list of pages used for allocating all of the
235 * zone bitmap objects and bitmap block objects.
237 * NOTE: It has to be possible to lay out the bitmap in memory
238 * using only allocations of order 0. Additionally, the bitmap is
239 * designed to work with arbitrary number of zones (this is over the
240 * top for now, but let's avoid making unnecessary assumptions ;-).
242 * struct zone_bitmap contains a pointer to a list of bitmap block
243 * objects and a pointer to the bitmap block object that has been
244 * most recently used for setting bits. Additionally, it contains the
245 * pfns that correspond to the start and end of the represented zone.
247 * struct bm_block contains a pointer to the memory page in which
248 * information is stored (in the form of a block of bitmap)
249 * It also contains the pfns that correspond to the start and end of
250 * the represented memory area.
252 * The memory bitmap is organized as a radix tree to guarantee fast random
253 * access to the bits. There is one radix tree for each zone (as returned
254 * from create_mem_extents).
256 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
257 * two linked lists for the nodes of the tree, one for the inner nodes and
258 * one for the leave nodes. The linked leave nodes are used for fast linear
259 * access of the memory bitmap.
261 * The struct rtree_node represents one node of the radix tree.
264 #define BM_END_OF_MAP (~0UL)
266 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
267 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
268 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
271 * struct rtree_node is a wrapper struct to link the nodes
272 * of the rtree together for easy linear iteration over
273 * bits and easy freeing
276 struct list_head list
;
281 * struct mem_zone_bm_rtree represents a bitmap used for one
282 * populated memory zone.
284 struct mem_zone_bm_rtree
{
285 struct list_head list
; /* Link Zones together */
286 struct list_head nodes
; /* Radix Tree inner nodes */
287 struct list_head leaves
; /* Radix Tree leaves */
288 unsigned long start_pfn
; /* Zone start page frame */
289 unsigned long end_pfn
; /* Zone end page frame + 1 */
290 struct rtree_node
*rtree
; /* Radix Tree Root */
291 int levels
; /* Number of Radix Tree Levels */
292 unsigned int blocks
; /* Number of Bitmap Blocks */
295 /* strcut bm_position is used for browsing memory bitmaps */
298 struct mem_zone_bm_rtree
*zone
;
299 struct rtree_node
*node
;
300 unsigned long node_pfn
;
304 struct memory_bitmap
{
305 struct list_head zones
;
306 struct linked_page
*p_list
; /* list of pages used to store zone
307 * bitmap objects and bitmap block
310 struct bm_position cur
; /* most recently used bit position */
313 /* Functions that operate on memory bitmaps */
315 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
316 #if BITS_PER_LONG == 32
317 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
319 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
321 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
324 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
326 * This function is used to allocate inner nodes as well as the
327 * leave nodes of the radix tree. It also adds the node to the
328 * corresponding linked list passed in by the *list parameter.
330 static struct rtree_node
*alloc_rtree_node(gfp_t gfp_mask
, int safe_needed
,
331 struct chain_allocator
*ca
,
332 struct list_head
*list
)
334 struct rtree_node
*node
;
336 node
= chain_alloc(ca
, sizeof(struct rtree_node
));
340 node
->data
= get_image_page(gfp_mask
, safe_needed
);
344 list_add_tail(&node
->list
, list
);
350 * add_rtree_block - Add a new leave node to the radix tree
352 * The leave nodes need to be allocated in order to keep the leaves
353 * linked list in order. This is guaranteed by the zone->blocks
356 static int add_rtree_block(struct mem_zone_bm_rtree
*zone
, gfp_t gfp_mask
,
357 int safe_needed
, struct chain_allocator
*ca
)
359 struct rtree_node
*node
, *block
, **dst
;
360 unsigned int levels_needed
, block_nr
;
363 block_nr
= zone
->blocks
;
366 /* How many levels do we need for this block nr? */
369 block_nr
>>= BM_RTREE_LEVEL_SHIFT
;
372 /* Make sure the rtree has enough levels */
373 for (i
= zone
->levels
; i
< levels_needed
; i
++) {
374 node
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
,
379 node
->data
[0] = (unsigned long)zone
->rtree
;
384 /* Allocate new block */
385 block
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
, &zone
->leaves
);
389 /* Now walk the rtree to insert the block */
392 block_nr
= zone
->blocks
;
393 for (i
= zone
->levels
; i
> 0; i
--) {
397 node
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
,
404 index
= block_nr
>> ((i
- 1) * BM_RTREE_LEVEL_SHIFT
);
405 index
&= BM_RTREE_LEVEL_MASK
;
406 dst
= (struct rtree_node
**)&((*dst
)->data
[index
]);
416 static void free_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
,
417 int clear_nosave_free
);
420 * create_zone_bm_rtree - create a radix tree for one zone
422 * Allocated the mem_zone_bm_rtree structure and initializes it.
423 * This function also allocated and builds the radix tree for the
426 static struct mem_zone_bm_rtree
*
427 create_zone_bm_rtree(gfp_t gfp_mask
, int safe_needed
,
428 struct chain_allocator
*ca
,
429 unsigned long start
, unsigned long end
)
431 struct mem_zone_bm_rtree
*zone
;
432 unsigned int i
, nr_blocks
;
436 zone
= chain_alloc(ca
, sizeof(struct mem_zone_bm_rtree
));
440 INIT_LIST_HEAD(&zone
->nodes
);
441 INIT_LIST_HEAD(&zone
->leaves
);
442 zone
->start_pfn
= start
;
444 nr_blocks
= DIV_ROUND_UP(pages
, BM_BITS_PER_BLOCK
);
446 for (i
= 0; i
< nr_blocks
; i
++) {
447 if (add_rtree_block(zone
, gfp_mask
, safe_needed
, ca
)) {
448 free_zone_bm_rtree(zone
, PG_UNSAFE_CLEAR
);
457 * free_zone_bm_rtree - Free the memory of the radix tree
459 * Free all node pages of the radix tree. The mem_zone_bm_rtree
460 * structure itself is not freed here nor are the rtree_node
463 static void free_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
,
464 int clear_nosave_free
)
466 struct rtree_node
*node
;
468 list_for_each_entry(node
, &zone
->nodes
, list
)
469 free_image_page(node
->data
, clear_nosave_free
);
471 list_for_each_entry(node
, &zone
->leaves
, list
)
472 free_image_page(node
->data
, clear_nosave_free
);
475 static void memory_bm_position_reset(struct memory_bitmap
*bm
)
477 bm
->cur
.zone
= list_entry(bm
->zones
.next
, struct mem_zone_bm_rtree
,
479 bm
->cur
.node
= list_entry(bm
->cur
.zone
->leaves
.next
,
480 struct rtree_node
, list
);
481 bm
->cur
.node_pfn
= 0;
482 bm
->cur
.node_bit
= 0;
485 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
);
488 struct list_head hook
;
494 * free_mem_extents - free a list of memory extents
495 * @list - list of extents to empty
497 static void free_mem_extents(struct list_head
*list
)
499 struct mem_extent
*ext
, *aux
;
501 list_for_each_entry_safe(ext
, aux
, list
, hook
) {
502 list_del(&ext
->hook
);
508 * create_mem_extents - create a list of memory extents representing
509 * contiguous ranges of PFNs
510 * @list - list to put the extents into
511 * @gfp_mask - mask to use for memory allocations
513 static int create_mem_extents(struct list_head
*list
, gfp_t gfp_mask
)
517 INIT_LIST_HEAD(list
);
519 for_each_populated_zone(zone
) {
520 unsigned long zone_start
, zone_end
;
521 struct mem_extent
*ext
, *cur
, *aux
;
523 zone_start
= zone
->zone_start_pfn
;
524 zone_end
= zone_end_pfn(zone
);
526 list_for_each_entry(ext
, list
, hook
)
527 if (zone_start
<= ext
->end
)
530 if (&ext
->hook
== list
|| zone_end
< ext
->start
) {
531 /* New extent is necessary */
532 struct mem_extent
*new_ext
;
534 new_ext
= kzalloc(sizeof(struct mem_extent
), gfp_mask
);
536 free_mem_extents(list
);
539 new_ext
->start
= zone_start
;
540 new_ext
->end
= zone_end
;
541 list_add_tail(&new_ext
->hook
, &ext
->hook
);
545 /* Merge this zone's range of PFNs with the existing one */
546 if (zone_start
< ext
->start
)
547 ext
->start
= zone_start
;
548 if (zone_end
> ext
->end
)
551 /* More merging may be possible */
553 list_for_each_entry_safe_continue(cur
, aux
, list
, hook
) {
554 if (zone_end
< cur
->start
)
556 if (zone_end
< cur
->end
)
558 list_del(&cur
->hook
);
567 * memory_bm_create - allocate memory for a memory bitmap
570 memory_bm_create(struct memory_bitmap
*bm
, gfp_t gfp_mask
, int safe_needed
)
572 struct chain_allocator ca
;
573 struct list_head mem_extents
;
574 struct mem_extent
*ext
;
577 chain_init(&ca
, gfp_mask
, safe_needed
);
578 INIT_LIST_HEAD(&bm
->zones
);
580 error
= create_mem_extents(&mem_extents
, gfp_mask
);
584 list_for_each_entry(ext
, &mem_extents
, hook
) {
585 struct mem_zone_bm_rtree
*zone
;
587 zone
= create_zone_bm_rtree(gfp_mask
, safe_needed
, &ca
,
588 ext
->start
, ext
->end
);
593 list_add_tail(&zone
->list
, &bm
->zones
);
596 bm
->p_list
= ca
.chain
;
597 memory_bm_position_reset(bm
);
599 free_mem_extents(&mem_extents
);
603 bm
->p_list
= ca
.chain
;
604 memory_bm_free(bm
, PG_UNSAFE_CLEAR
);
609 * memory_bm_free - free memory occupied by the memory bitmap @bm
611 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
)
613 struct mem_zone_bm_rtree
*zone
;
615 list_for_each_entry(zone
, &bm
->zones
, list
)
616 free_zone_bm_rtree(zone
, clear_nosave_free
);
618 free_list_of_pages(bm
->p_list
, clear_nosave_free
);
620 INIT_LIST_HEAD(&bm
->zones
);
624 * memory_bm_find_bit - Find the bit for pfn in the memory
627 * Find the bit in the bitmap @bm that corresponds to given pfn.
628 * The cur.zone, cur.block and cur.node_pfn member of @bm are
630 * It walks the radix tree to find the page which contains the bit for
631 * pfn and returns the bit position in **addr and *bit_nr.
633 static int memory_bm_find_bit(struct memory_bitmap
*bm
, unsigned long pfn
,
634 void **addr
, unsigned int *bit_nr
)
636 struct mem_zone_bm_rtree
*curr
, *zone
;
637 struct rtree_node
*node
;
642 if (pfn
>= zone
->start_pfn
&& pfn
< zone
->end_pfn
)
647 /* Find the right zone */
648 list_for_each_entry(curr
, &bm
->zones
, list
) {
649 if (pfn
>= curr
->start_pfn
&& pfn
< curr
->end_pfn
) {
660 * We have a zone. Now walk the radix tree to find the leave
665 if (((pfn
- zone
->start_pfn
) & ~BM_BLOCK_MASK
) == bm
->cur
.node_pfn
)
669 block_nr
= (pfn
- zone
->start_pfn
) >> BM_BLOCK_SHIFT
;
671 for (i
= zone
->levels
; i
> 0; i
--) {
674 index
= block_nr
>> ((i
- 1) * BM_RTREE_LEVEL_SHIFT
);
675 index
&= BM_RTREE_LEVEL_MASK
;
676 BUG_ON(node
->data
[index
] == 0);
677 node
= (struct rtree_node
*)node
->data
[index
];
681 /* Update last position */
684 bm
->cur
.node_pfn
= (pfn
- zone
->start_pfn
) & ~BM_BLOCK_MASK
;
686 /* Set return values */
688 *bit_nr
= (pfn
- zone
->start_pfn
) & BM_BLOCK_MASK
;
693 static void memory_bm_set_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
699 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
704 static int mem_bm_set_bit_check(struct memory_bitmap
*bm
, unsigned long pfn
)
710 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
717 static void memory_bm_clear_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
723 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
725 clear_bit(bit
, addr
);
728 static void memory_bm_clear_current(struct memory_bitmap
*bm
)
732 bit
= max(bm
->cur
.node_bit
- 1, 0);
733 clear_bit(bit
, bm
->cur
.node
->data
);
736 static int memory_bm_test_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
742 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
744 return test_bit(bit
, addr
);
747 static bool memory_bm_pfn_present(struct memory_bitmap
*bm
, unsigned long pfn
)
752 return !memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
756 * rtree_next_node - Jumps to the next leave node
758 * Sets the position to the beginning of the next node in the
759 * memory bitmap. This is either the next node in the current
760 * zone's radix tree or the first node in the radix tree of the
763 * Returns true if there is a next node, false otherwise.
765 static bool rtree_next_node(struct memory_bitmap
*bm
)
767 bm
->cur
.node
= list_entry(bm
->cur
.node
->list
.next
,
768 struct rtree_node
, list
);
769 if (&bm
->cur
.node
->list
!= &bm
->cur
.zone
->leaves
) {
770 bm
->cur
.node_pfn
+= BM_BITS_PER_BLOCK
;
771 bm
->cur
.node_bit
= 0;
772 touch_softlockup_watchdog();
776 /* No more nodes, goto next zone */
777 bm
->cur
.zone
= list_entry(bm
->cur
.zone
->list
.next
,
778 struct mem_zone_bm_rtree
, list
);
779 if (&bm
->cur
.zone
->list
!= &bm
->zones
) {
780 bm
->cur
.node
= list_entry(bm
->cur
.zone
->leaves
.next
,
781 struct rtree_node
, list
);
782 bm
->cur
.node_pfn
= 0;
783 bm
->cur
.node_bit
= 0;
792 * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
794 * Starting from the last returned position this function searches
795 * for the next set bit in the memory bitmap and returns its
796 * number. If no more bit is set BM_END_OF_MAP is returned.
798 * It is required to run memory_bm_position_reset() before the
799 * first call to this function.
801 static unsigned long memory_bm_next_pfn(struct memory_bitmap
*bm
)
803 unsigned long bits
, pfn
, pages
;
807 pages
= bm
->cur
.zone
->end_pfn
- bm
->cur
.zone
->start_pfn
;
808 bits
= min(pages
- bm
->cur
.node_pfn
, BM_BITS_PER_BLOCK
);
809 bit
= find_next_bit(bm
->cur
.node
->data
, bits
,
812 pfn
= bm
->cur
.zone
->start_pfn
+ bm
->cur
.node_pfn
+ bit
;
813 bm
->cur
.node_bit
= bit
+ 1;
816 } while (rtree_next_node(bm
));
818 return BM_END_OF_MAP
;
822 * This structure represents a range of page frames the contents of which
823 * should not be saved during the suspend.
826 struct nosave_region
{
827 struct list_head list
;
828 unsigned long start_pfn
;
829 unsigned long end_pfn
;
832 static LIST_HEAD(nosave_regions
);
835 * register_nosave_region - register a range of page frames the contents
836 * of which should not be saved during the suspend (to be used in the early
837 * initialization code)
841 __register_nosave_region(unsigned long start_pfn
, unsigned long end_pfn
,
844 struct nosave_region
*region
;
846 if (start_pfn
>= end_pfn
)
849 if (!list_empty(&nosave_regions
)) {
850 /* Try to extend the previous region (they should be sorted) */
851 region
= list_entry(nosave_regions
.prev
,
852 struct nosave_region
, list
);
853 if (region
->end_pfn
== start_pfn
) {
854 region
->end_pfn
= end_pfn
;
859 /* during init, this shouldn't fail */
860 region
= kmalloc(sizeof(struct nosave_region
), GFP_KERNEL
);
863 /* This allocation cannot fail */
864 region
= memblock_virt_alloc(sizeof(struct nosave_region
), 0);
865 region
->start_pfn
= start_pfn
;
866 region
->end_pfn
= end_pfn
;
867 list_add_tail(®ion
->list
, &nosave_regions
);
869 printk(KERN_INFO
"PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
870 (unsigned long long) start_pfn
<< PAGE_SHIFT
,
871 ((unsigned long long) end_pfn
<< PAGE_SHIFT
) - 1);
875 * Set bits in this map correspond to the page frames the contents of which
876 * should not be saved during the suspend.
878 static struct memory_bitmap
*forbidden_pages_map
;
880 /* Set bits in this map correspond to free page frames. */
881 static struct memory_bitmap
*free_pages_map
;
884 * Each page frame allocated for creating the image is marked by setting the
885 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
888 void swsusp_set_page_free(struct page
*page
)
891 memory_bm_set_bit(free_pages_map
, page_to_pfn(page
));
894 static int swsusp_page_is_free(struct page
*page
)
896 return free_pages_map
?
897 memory_bm_test_bit(free_pages_map
, page_to_pfn(page
)) : 0;
900 void swsusp_unset_page_free(struct page
*page
)
903 memory_bm_clear_bit(free_pages_map
, page_to_pfn(page
));
906 static void swsusp_set_page_forbidden(struct page
*page
)
908 if (forbidden_pages_map
)
909 memory_bm_set_bit(forbidden_pages_map
, page_to_pfn(page
));
912 int swsusp_page_is_forbidden(struct page
*page
)
914 return forbidden_pages_map
?
915 memory_bm_test_bit(forbidden_pages_map
, page_to_pfn(page
)) : 0;
918 static void swsusp_unset_page_forbidden(struct page
*page
)
920 if (forbidden_pages_map
)
921 memory_bm_clear_bit(forbidden_pages_map
, page_to_pfn(page
));
925 * mark_nosave_pages - set bits corresponding to the page frames the
926 * contents of which should not be saved in a given bitmap.
929 static void mark_nosave_pages(struct memory_bitmap
*bm
)
931 struct nosave_region
*region
;
933 if (list_empty(&nosave_regions
))
936 list_for_each_entry(region
, &nosave_regions
, list
) {
939 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
940 (unsigned long long) region
->start_pfn
<< PAGE_SHIFT
,
941 ((unsigned long long) region
->end_pfn
<< PAGE_SHIFT
)
944 for (pfn
= region
->start_pfn
; pfn
< region
->end_pfn
; pfn
++)
945 if (pfn_valid(pfn
)) {
947 * It is safe to ignore the result of
948 * mem_bm_set_bit_check() here, since we won't
949 * touch the PFNs for which the error is
952 mem_bm_set_bit_check(bm
, pfn
);
958 * create_basic_memory_bitmaps - create bitmaps needed for marking page
959 * frames that should not be saved and free page frames. The pointers
960 * forbidden_pages_map and free_pages_map are only modified if everything
961 * goes well, because we don't want the bits to be used before both bitmaps
965 int create_basic_memory_bitmaps(void)
967 struct memory_bitmap
*bm1
, *bm2
;
970 if (forbidden_pages_map
&& free_pages_map
)
973 BUG_ON(forbidden_pages_map
|| free_pages_map
);
975 bm1
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
979 error
= memory_bm_create(bm1
, GFP_KERNEL
, PG_ANY
);
981 goto Free_first_object
;
983 bm2
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
985 goto Free_first_bitmap
;
987 error
= memory_bm_create(bm2
, GFP_KERNEL
, PG_ANY
);
989 goto Free_second_object
;
991 forbidden_pages_map
= bm1
;
992 free_pages_map
= bm2
;
993 mark_nosave_pages(forbidden_pages_map
);
995 pr_debug("PM: Basic memory bitmaps created\n");
1002 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
1009 * free_basic_memory_bitmaps - free memory bitmaps allocated by
1010 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
1011 * so that the bitmaps themselves are not referred to while they are being
1015 void free_basic_memory_bitmaps(void)
1017 struct memory_bitmap
*bm1
, *bm2
;
1019 if (WARN_ON(!(forbidden_pages_map
&& free_pages_map
)))
1022 bm1
= forbidden_pages_map
;
1023 bm2
= free_pages_map
;
1024 forbidden_pages_map
= NULL
;
1025 free_pages_map
= NULL
;
1026 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
1028 memory_bm_free(bm2
, PG_UNSAFE_CLEAR
);
1031 pr_debug("PM: Basic memory bitmaps freed\n");
1035 * snapshot_additional_pages - estimate the number of additional pages
1036 * be needed for setting up the suspend image data structures for given
1037 * zone (usually the returned value is greater than the exact number)
1040 unsigned int snapshot_additional_pages(struct zone
*zone
)
1042 unsigned int rtree
, nodes
;
1044 rtree
= nodes
= DIV_ROUND_UP(zone
->spanned_pages
, BM_BITS_PER_BLOCK
);
1045 rtree
+= DIV_ROUND_UP(rtree
* sizeof(struct rtree_node
),
1046 LINKED_PAGE_DATA_SIZE
);
1048 nodes
= DIV_ROUND_UP(nodes
, BM_ENTRIES_PER_LEVEL
);
1055 #ifdef CONFIG_HIGHMEM
1057 * count_free_highmem_pages - compute the total number of free highmem
1058 * pages, system-wide.
1061 static unsigned int count_free_highmem_pages(void)
1064 unsigned int cnt
= 0;
1066 for_each_populated_zone(zone
)
1067 if (is_highmem(zone
))
1068 cnt
+= zone_page_state(zone
, NR_FREE_PAGES
);
1074 * saveable_highmem_page - Determine whether a highmem page should be
1075 * included in the suspend image.
1077 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1078 * and it isn't a part of a free chunk of pages.
1080 static struct page
*saveable_highmem_page(struct zone
*zone
, unsigned long pfn
)
1084 if (!pfn_valid(pfn
))
1087 page
= pfn_to_page(pfn
);
1088 if (page_zone(page
) != zone
)
1091 BUG_ON(!PageHighMem(page
));
1093 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
) ||
1097 if (page_is_guard(page
))
1104 * count_highmem_pages - compute the total number of saveable highmem
1108 static unsigned int count_highmem_pages(void)
1113 for_each_populated_zone(zone
) {
1114 unsigned long pfn
, max_zone_pfn
;
1116 if (!is_highmem(zone
))
1119 mark_free_pages(zone
);
1120 max_zone_pfn
= zone_end_pfn(zone
);
1121 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1122 if (saveable_highmem_page(zone
, pfn
))
1128 static inline void *saveable_highmem_page(struct zone
*z
, unsigned long p
)
1132 #endif /* CONFIG_HIGHMEM */
1135 * saveable_page - Determine whether a non-highmem page should be included
1136 * in the suspend image.
1138 * We should save the page if it isn't Nosave, and is not in the range
1139 * of pages statically defined as 'unsaveable', and it isn't a part of
1140 * a free chunk of pages.
1142 static struct page
*saveable_page(struct zone
*zone
, unsigned long pfn
)
1146 if (!pfn_valid(pfn
))
1149 page
= pfn_to_page(pfn
);
1150 if (page_zone(page
) != zone
)
1153 BUG_ON(PageHighMem(page
));
1155 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
))
1158 if (PageReserved(page
)
1159 && (!kernel_page_present(page
) || pfn_is_nosave(pfn
)))
1162 if (page_is_guard(page
))
1169 * count_data_pages - compute the total number of saveable non-highmem
1173 static unsigned int count_data_pages(void)
1176 unsigned long pfn
, max_zone_pfn
;
1179 for_each_populated_zone(zone
) {
1180 if (is_highmem(zone
))
1183 mark_free_pages(zone
);
1184 max_zone_pfn
= zone_end_pfn(zone
);
1185 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1186 if (saveable_page(zone
, pfn
))
1192 /* This is needed, because copy_page and memcpy are not usable for copying
1195 static inline void do_copy_page(long *dst
, long *src
)
1199 for (n
= PAGE_SIZE
/ sizeof(long); n
; n
--)
1205 * safe_copy_page - check if the page we are going to copy is marked as
1206 * present in the kernel page tables (this always is the case if
1207 * CONFIG_DEBUG_PAGEALLOC is not set and in that case
1208 * kernel_page_present() always returns 'true').
1210 static void safe_copy_page(void *dst
, struct page
*s_page
)
1212 if (kernel_page_present(s_page
)) {
1213 do_copy_page(dst
, page_address(s_page
));
1215 kernel_map_pages(s_page
, 1, 1);
1216 do_copy_page(dst
, page_address(s_page
));
1217 kernel_map_pages(s_page
, 1, 0);
1222 #ifdef CONFIG_HIGHMEM
1223 static inline struct page
*
1224 page_is_saveable(struct zone
*zone
, unsigned long pfn
)
1226 return is_highmem(zone
) ?
1227 saveable_highmem_page(zone
, pfn
) : saveable_page(zone
, pfn
);
1230 static void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
1232 struct page
*s_page
, *d_page
;
1235 s_page
= pfn_to_page(src_pfn
);
1236 d_page
= pfn_to_page(dst_pfn
);
1237 if (PageHighMem(s_page
)) {
1238 src
= kmap_atomic(s_page
);
1239 dst
= kmap_atomic(d_page
);
1240 do_copy_page(dst
, src
);
1244 if (PageHighMem(d_page
)) {
1245 /* Page pointed to by src may contain some kernel
1246 * data modified by kmap_atomic()
1248 safe_copy_page(buffer
, s_page
);
1249 dst
= kmap_atomic(d_page
);
1250 copy_page(dst
, buffer
);
1253 safe_copy_page(page_address(d_page
), s_page
);
1258 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1260 static inline void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
1262 safe_copy_page(page_address(pfn_to_page(dst_pfn
)),
1263 pfn_to_page(src_pfn
));
1265 #endif /* CONFIG_HIGHMEM */
1268 copy_data_pages(struct memory_bitmap
*copy_bm
, struct memory_bitmap
*orig_bm
)
1273 for_each_populated_zone(zone
) {
1274 unsigned long max_zone_pfn
;
1276 mark_free_pages(zone
);
1277 max_zone_pfn
= zone_end_pfn(zone
);
1278 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1279 if (page_is_saveable(zone
, pfn
))
1280 memory_bm_set_bit(orig_bm
, pfn
);
1282 memory_bm_position_reset(orig_bm
);
1283 memory_bm_position_reset(copy_bm
);
1285 pfn
= memory_bm_next_pfn(orig_bm
);
1286 if (unlikely(pfn
== BM_END_OF_MAP
))
1288 copy_data_page(memory_bm_next_pfn(copy_bm
), pfn
);
1292 /* Total number of image pages */
1293 static unsigned int nr_copy_pages
;
1294 /* Number of pages needed for saving the original pfns of the image pages */
1295 static unsigned int nr_meta_pages
;
1297 * Numbers of normal and highmem page frames allocated for hibernation image
1298 * before suspending devices.
1300 unsigned int alloc_normal
, alloc_highmem
;
1302 * Memory bitmap used for marking saveable pages (during hibernation) or
1303 * hibernation image pages (during restore)
1305 static struct memory_bitmap orig_bm
;
1307 * Memory bitmap used during hibernation for marking allocated page frames that
1308 * will contain copies of saveable pages. During restore it is initially used
1309 * for marking hibernation image pages, but then the set bits from it are
1310 * duplicated in @orig_bm and it is released. On highmem systems it is next
1311 * used for marking "safe" highmem pages, but it has to be reinitialized for
1314 static struct memory_bitmap copy_bm
;
1317 * swsusp_free - free pages allocated for the suspend.
1319 * Suspend pages are alocated before the atomic copy is made, so we
1320 * need to release them after the resume.
1323 void swsusp_free(void)
1325 unsigned long fb_pfn
, fr_pfn
;
1327 if (!forbidden_pages_map
|| !free_pages_map
)
1330 memory_bm_position_reset(forbidden_pages_map
);
1331 memory_bm_position_reset(free_pages_map
);
1334 fr_pfn
= memory_bm_next_pfn(free_pages_map
);
1335 fb_pfn
= memory_bm_next_pfn(forbidden_pages_map
);
1338 * Find the next bit set in both bitmaps. This is guaranteed to
1339 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1342 if (fb_pfn
< fr_pfn
)
1343 fb_pfn
= memory_bm_next_pfn(forbidden_pages_map
);
1344 if (fr_pfn
< fb_pfn
)
1345 fr_pfn
= memory_bm_next_pfn(free_pages_map
);
1346 } while (fb_pfn
!= fr_pfn
);
1348 if (fr_pfn
!= BM_END_OF_MAP
&& pfn_valid(fr_pfn
)) {
1349 struct page
*page
= pfn_to_page(fr_pfn
);
1351 memory_bm_clear_current(forbidden_pages_map
);
1352 memory_bm_clear_current(free_pages_map
);
1360 restore_pblist
= NULL
;
1366 /* Helper functions used for the shrinking of memory. */
1368 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1371 * preallocate_image_pages - Allocate a number of pages for hibernation image
1372 * @nr_pages: Number of page frames to allocate.
1373 * @mask: GFP flags to use for the allocation.
1375 * Return value: Number of page frames actually allocated
1377 static unsigned long preallocate_image_pages(unsigned long nr_pages
, gfp_t mask
)
1379 unsigned long nr_alloc
= 0;
1381 while (nr_pages
> 0) {
1384 page
= alloc_image_page(mask
);
1387 memory_bm_set_bit(©_bm
, page_to_pfn(page
));
1388 if (PageHighMem(page
))
1399 static unsigned long preallocate_image_memory(unsigned long nr_pages
,
1400 unsigned long avail_normal
)
1402 unsigned long alloc
;
1404 if (avail_normal
<= alloc_normal
)
1407 alloc
= avail_normal
- alloc_normal
;
1408 if (nr_pages
< alloc
)
1411 return preallocate_image_pages(alloc
, GFP_IMAGE
);
1414 #ifdef CONFIG_HIGHMEM
1415 static unsigned long preallocate_image_highmem(unsigned long nr_pages
)
1417 return preallocate_image_pages(nr_pages
, GFP_IMAGE
| __GFP_HIGHMEM
);
1421 * __fraction - Compute (an approximation of) x * (multiplier / base)
1423 static unsigned long __fraction(u64 x
, u64 multiplier
, u64 base
)
1427 return (unsigned long)x
;
1430 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages
,
1431 unsigned long highmem
,
1432 unsigned long total
)
1434 unsigned long alloc
= __fraction(nr_pages
, highmem
, total
);
1436 return preallocate_image_pages(alloc
, GFP_IMAGE
| __GFP_HIGHMEM
);
1438 #else /* CONFIG_HIGHMEM */
1439 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages
)
1444 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages
,
1445 unsigned long highmem
,
1446 unsigned long total
)
1450 #endif /* CONFIG_HIGHMEM */
1453 * free_unnecessary_pages - Release preallocated pages not needed for the image
1455 static void free_unnecessary_pages(void)
1457 unsigned long save
, to_free_normal
, to_free_highmem
;
1459 save
= count_data_pages();
1460 if (alloc_normal
>= save
) {
1461 to_free_normal
= alloc_normal
- save
;
1465 save
-= alloc_normal
;
1467 save
+= count_highmem_pages();
1468 if (alloc_highmem
>= save
) {
1469 to_free_highmem
= alloc_highmem
- save
;
1471 to_free_highmem
= 0;
1472 save
-= alloc_highmem
;
1473 if (to_free_normal
> save
)
1474 to_free_normal
-= save
;
1479 memory_bm_position_reset(©_bm
);
1481 while (to_free_normal
> 0 || to_free_highmem
> 0) {
1482 unsigned long pfn
= memory_bm_next_pfn(©_bm
);
1483 struct page
*page
= pfn_to_page(pfn
);
1485 if (PageHighMem(page
)) {
1486 if (!to_free_highmem
)
1491 if (!to_free_normal
)
1496 memory_bm_clear_bit(©_bm
, pfn
);
1497 swsusp_unset_page_forbidden(page
);
1498 swsusp_unset_page_free(page
);
1504 * minimum_image_size - Estimate the minimum acceptable size of an image
1505 * @saveable: Number of saveable pages in the system.
1507 * We want to avoid attempting to free too much memory too hard, so estimate the
1508 * minimum acceptable size of a hibernation image to use as the lower limit for
1509 * preallocating memory.
1511 * We assume that the minimum image size should be proportional to
1513 * [number of saveable pages] - [number of pages that can be freed in theory]
1515 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1516 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1517 * minus mapped file pages.
1519 static unsigned long minimum_image_size(unsigned long saveable
)
1523 size
= global_page_state(NR_SLAB_RECLAIMABLE
)
1524 + global_page_state(NR_ACTIVE_ANON
)
1525 + global_page_state(NR_INACTIVE_ANON
)
1526 + global_page_state(NR_ACTIVE_FILE
)
1527 + global_page_state(NR_INACTIVE_FILE
)
1528 - global_page_state(NR_FILE_MAPPED
);
1530 return saveable
<= size
? 0 : saveable
- size
;
1534 * hibernate_preallocate_memory - Preallocate memory for hibernation image
1536 * To create a hibernation image it is necessary to make a copy of every page
1537 * frame in use. We also need a number of page frames to be free during
1538 * hibernation for allocations made while saving the image and for device
1539 * drivers, in case they need to allocate memory from their hibernation
1540 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1541 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1542 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1543 * total number of available page frames and allocate at least
1545 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1546 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1548 * of them, which corresponds to the maximum size of a hibernation image.
1550 * If image_size is set below the number following from the above formula,
1551 * the preallocation of memory is continued until the total number of saveable
1552 * pages in the system is below the requested image size or the minimum
1553 * acceptable image size returned by minimum_image_size(), whichever is greater.
1555 int hibernate_preallocate_memory(void)
1558 unsigned long saveable
, size
, max_size
, count
, highmem
, pages
= 0;
1559 unsigned long alloc
, save_highmem
, pages_highmem
, avail_normal
;
1560 struct timeval start
, stop
;
1563 printk(KERN_INFO
"PM: Preallocating image memory... ");
1564 do_gettimeofday(&start
);
1566 error
= memory_bm_create(&orig_bm
, GFP_IMAGE
, PG_ANY
);
1570 error
= memory_bm_create(©_bm
, GFP_IMAGE
, PG_ANY
);
1577 /* Count the number of saveable data pages. */
1578 save_highmem
= count_highmem_pages();
1579 saveable
= count_data_pages();
1582 * Compute the total number of page frames we can use (count) and the
1583 * number of pages needed for image metadata (size).
1586 saveable
+= save_highmem
;
1587 highmem
= save_highmem
;
1589 for_each_populated_zone(zone
) {
1590 size
+= snapshot_additional_pages(zone
);
1591 if (is_highmem(zone
))
1592 highmem
+= zone_page_state(zone
, NR_FREE_PAGES
);
1594 count
+= zone_page_state(zone
, NR_FREE_PAGES
);
1596 avail_normal
= count
;
1598 count
-= totalreserve_pages
;
1600 /* Add number of pages required for page keys (s390 only). */
1601 size
+= page_key_additional_pages(saveable
);
1603 /* Compute the maximum number of saveable pages to leave in memory. */
1604 max_size
= (count
- (size
+ PAGES_FOR_IO
)) / 2
1605 - 2 * DIV_ROUND_UP(reserved_size
, PAGE_SIZE
);
1606 /* Compute the desired number of image pages specified by image_size. */
1607 size
= DIV_ROUND_UP(image_size
, PAGE_SIZE
);
1608 if (size
> max_size
)
1611 * If the desired number of image pages is at least as large as the
1612 * current number of saveable pages in memory, allocate page frames for
1613 * the image and we're done.
1615 if (size
>= saveable
) {
1616 pages
= preallocate_image_highmem(save_highmem
);
1617 pages
+= preallocate_image_memory(saveable
- pages
, avail_normal
);
1621 /* Estimate the minimum size of the image. */
1622 pages
= minimum_image_size(saveable
);
1624 * To avoid excessive pressure on the normal zone, leave room in it to
1625 * accommodate an image of the minimum size (unless it's already too
1626 * small, in which case don't preallocate pages from it at all).
1628 if (avail_normal
> pages
)
1629 avail_normal
-= pages
;
1633 size
= min_t(unsigned long, pages
, max_size
);
1636 * Let the memory management subsystem know that we're going to need a
1637 * large number of page frames to allocate and make it free some memory.
1638 * NOTE: If this is not done, performance will be hurt badly in some
1641 shrink_all_memory(saveable
- size
);
1644 * The number of saveable pages in memory was too high, so apply some
1645 * pressure to decrease it. First, make room for the largest possible
1646 * image and fail if that doesn't work. Next, try to decrease the size
1647 * of the image as much as indicated by 'size' using allocations from
1648 * highmem and non-highmem zones separately.
1650 pages_highmem
= preallocate_image_highmem(highmem
/ 2);
1651 alloc
= count
- max_size
;
1652 if (alloc
> pages_highmem
)
1653 alloc
-= pages_highmem
;
1656 pages
= preallocate_image_memory(alloc
, avail_normal
);
1657 if (pages
< alloc
) {
1658 /* We have exhausted non-highmem pages, try highmem. */
1660 pages
+= pages_highmem
;
1661 pages_highmem
= preallocate_image_highmem(alloc
);
1662 if (pages_highmem
< alloc
)
1664 pages
+= pages_highmem
;
1666 * size is the desired number of saveable pages to leave in
1667 * memory, so try to preallocate (all memory - size) pages.
1669 alloc
= (count
- pages
) - size
;
1670 pages
+= preallocate_image_highmem(alloc
);
1673 * There are approximately max_size saveable pages at this point
1674 * and we want to reduce this number down to size.
1676 alloc
= max_size
- size
;
1677 size
= preallocate_highmem_fraction(alloc
, highmem
, count
);
1678 pages_highmem
+= size
;
1680 size
= preallocate_image_memory(alloc
, avail_normal
);
1681 pages_highmem
+= preallocate_image_highmem(alloc
- size
);
1682 pages
+= pages_highmem
+ size
;
1686 * We only need as many page frames for the image as there are saveable
1687 * pages in memory, but we have allocated more. Release the excessive
1690 free_unnecessary_pages();
1693 do_gettimeofday(&stop
);
1694 printk(KERN_CONT
"done (allocated %lu pages)\n", pages
);
1695 swsusp_show_speed(&start
, &stop
, pages
, "Allocated");
1700 printk(KERN_CONT
"\n");
1705 #ifdef CONFIG_HIGHMEM
1707 * count_pages_for_highmem - compute the number of non-highmem pages
1708 * that will be necessary for creating copies of highmem pages.
1711 static unsigned int count_pages_for_highmem(unsigned int nr_highmem
)
1713 unsigned int free_highmem
= count_free_highmem_pages() + alloc_highmem
;
1715 if (free_highmem
>= nr_highmem
)
1718 nr_highmem
-= free_highmem
;
1724 count_pages_for_highmem(unsigned int nr_highmem
) { return 0; }
1725 #endif /* CONFIG_HIGHMEM */
1728 * enough_free_mem - Make sure we have enough free memory for the
1732 static int enough_free_mem(unsigned int nr_pages
, unsigned int nr_highmem
)
1735 unsigned int free
= alloc_normal
;
1737 for_each_populated_zone(zone
)
1738 if (!is_highmem(zone
))
1739 free
+= zone_page_state(zone
, NR_FREE_PAGES
);
1741 nr_pages
+= count_pages_for_highmem(nr_highmem
);
1742 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1743 nr_pages
, PAGES_FOR_IO
, free
);
1745 return free
> nr_pages
+ PAGES_FOR_IO
;
1748 #ifdef CONFIG_HIGHMEM
1750 * get_highmem_buffer - if there are some highmem pages in the suspend
1751 * image, we may need the buffer to copy them and/or load their data.
1754 static inline int get_highmem_buffer(int safe_needed
)
1756 buffer
= get_image_page(GFP_ATOMIC
| __GFP_COLD
, safe_needed
);
1757 return buffer
? 0 : -ENOMEM
;
1761 * alloc_highmem_image_pages - allocate some highmem pages for the image.
1762 * Try to allocate as many pages as needed, but if the number of free
1763 * highmem pages is lesser than that, allocate them all.
1766 static inline unsigned int
1767 alloc_highmem_pages(struct memory_bitmap
*bm
, unsigned int nr_highmem
)
1769 unsigned int to_alloc
= count_free_highmem_pages();
1771 if (to_alloc
> nr_highmem
)
1772 to_alloc
= nr_highmem
;
1774 nr_highmem
-= to_alloc
;
1775 while (to_alloc
-- > 0) {
1778 page
= alloc_image_page(__GFP_HIGHMEM
);
1779 memory_bm_set_bit(bm
, page_to_pfn(page
));
1784 static inline int get_highmem_buffer(int safe_needed
) { return 0; }
1786 static inline unsigned int
1787 alloc_highmem_pages(struct memory_bitmap
*bm
, unsigned int n
) { return 0; }
1788 #endif /* CONFIG_HIGHMEM */
1791 * swsusp_alloc - allocate memory for the suspend image
1793 * We first try to allocate as many highmem pages as there are
1794 * saveable highmem pages in the system. If that fails, we allocate
1795 * non-highmem pages for the copies of the remaining highmem ones.
1797 * In this approach it is likely that the copies of highmem pages will
1798 * also be located in the high memory, because of the way in which
1799 * copy_data_pages() works.
1803 swsusp_alloc(struct memory_bitmap
*orig_bm
, struct memory_bitmap
*copy_bm
,
1804 unsigned int nr_pages
, unsigned int nr_highmem
)
1806 if (nr_highmem
> 0) {
1807 if (get_highmem_buffer(PG_ANY
))
1809 if (nr_highmem
> alloc_highmem
) {
1810 nr_highmem
-= alloc_highmem
;
1811 nr_pages
+= alloc_highmem_pages(copy_bm
, nr_highmem
);
1814 if (nr_pages
> alloc_normal
) {
1815 nr_pages
-= alloc_normal
;
1816 while (nr_pages
-- > 0) {
1819 page
= alloc_image_page(GFP_ATOMIC
| __GFP_COLD
);
1822 memory_bm_set_bit(copy_bm
, page_to_pfn(page
));
1833 asmlinkage __visible
int swsusp_save(void)
1835 unsigned int nr_pages
, nr_highmem
;
1837 printk(KERN_INFO
"PM: Creating hibernation image:\n");
1839 drain_local_pages(NULL
);
1840 nr_pages
= count_data_pages();
1841 nr_highmem
= count_highmem_pages();
1842 printk(KERN_INFO
"PM: Need to copy %u pages\n", nr_pages
+ nr_highmem
);
1844 if (!enough_free_mem(nr_pages
, nr_highmem
)) {
1845 printk(KERN_ERR
"PM: Not enough free memory\n");
1849 if (swsusp_alloc(&orig_bm
, ©_bm
, nr_pages
, nr_highmem
)) {
1850 printk(KERN_ERR
"PM: Memory allocation failed\n");
1854 /* During allocating of suspend pagedir, new cold pages may appear.
1857 drain_local_pages(NULL
);
1858 copy_data_pages(©_bm
, &orig_bm
);
1861 * End of critical section. From now on, we can write to memory,
1862 * but we should not touch disk. This specially means we must _not_
1863 * touch swap space! Except we must write out our image of course.
1866 nr_pages
+= nr_highmem
;
1867 nr_copy_pages
= nr_pages
;
1868 nr_meta_pages
= DIV_ROUND_UP(nr_pages
* sizeof(long), PAGE_SIZE
);
1870 printk(KERN_INFO
"PM: Hibernation image created (%d pages copied)\n",
1876 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1877 static int init_header_complete(struct swsusp_info
*info
)
1879 memcpy(&info
->uts
, init_utsname(), sizeof(struct new_utsname
));
1880 info
->version_code
= LINUX_VERSION_CODE
;
1884 static char *check_image_kernel(struct swsusp_info
*info
)
1886 if (info
->version_code
!= LINUX_VERSION_CODE
)
1887 return "kernel version";
1888 if (strcmp(info
->uts
.sysname
,init_utsname()->sysname
))
1889 return "system type";
1890 if (strcmp(info
->uts
.release
,init_utsname()->release
))
1891 return "kernel release";
1892 if (strcmp(info
->uts
.version
,init_utsname()->version
))
1894 if (strcmp(info
->uts
.machine
,init_utsname()->machine
))
1898 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1900 unsigned long snapshot_get_image_size(void)
1902 return nr_copy_pages
+ nr_meta_pages
+ 1;
1905 static int init_header(struct swsusp_info
*info
)
1907 memset(info
, 0, sizeof(struct swsusp_info
));
1908 info
->num_physpages
= get_num_physpages();
1909 info
->image_pages
= nr_copy_pages
;
1910 info
->pages
= snapshot_get_image_size();
1911 info
->size
= info
->pages
;
1912 info
->size
<<= PAGE_SHIFT
;
1913 return init_header_complete(info
);
1917 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1918 * are stored in the array @buf[] (1 page at a time)
1922 pack_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
1926 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
1927 buf
[j
] = memory_bm_next_pfn(bm
);
1928 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
1930 /* Save page key for data page (s390 only). */
1931 page_key_read(buf
+ j
);
1936 * snapshot_read_next - used for reading the system memory snapshot.
1938 * On the first call to it @handle should point to a zeroed
1939 * snapshot_handle structure. The structure gets updated and a pointer
1940 * to it should be passed to this function every next time.
1942 * On success the function returns a positive number. Then, the caller
1943 * is allowed to read up to the returned number of bytes from the memory
1944 * location computed by the data_of() macro.
1946 * The function returns 0 to indicate the end of data stream condition,
1947 * and a negative number is returned on error. In such cases the
1948 * structure pointed to by @handle is not updated and should not be used
1952 int snapshot_read_next(struct snapshot_handle
*handle
)
1954 if (handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
1958 /* This makes the buffer be freed by swsusp_free() */
1959 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
1966 error
= init_header((struct swsusp_info
*)buffer
);
1969 handle
->buffer
= buffer
;
1970 memory_bm_position_reset(&orig_bm
);
1971 memory_bm_position_reset(©_bm
);
1972 } else if (handle
->cur
<= nr_meta_pages
) {
1974 pack_pfns(buffer
, &orig_bm
);
1978 page
= pfn_to_page(memory_bm_next_pfn(©_bm
));
1979 if (PageHighMem(page
)) {
1980 /* Highmem pages are copied to the buffer,
1981 * because we can't return with a kmapped
1982 * highmem page (we may not be called again).
1986 kaddr
= kmap_atomic(page
);
1987 copy_page(buffer
, kaddr
);
1988 kunmap_atomic(kaddr
);
1989 handle
->buffer
= buffer
;
1991 handle
->buffer
= page_address(page
);
1999 * mark_unsafe_pages - mark the pages that cannot be used for storing
2000 * the image during resume, because they conflict with the pages that
2001 * had been used before suspend
2004 static int mark_unsafe_pages(struct memory_bitmap
*bm
)
2007 unsigned long pfn
, max_zone_pfn
;
2009 /* Clear page flags */
2010 for_each_populated_zone(zone
) {
2011 max_zone_pfn
= zone_end_pfn(zone
);
2012 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
2014 swsusp_unset_page_free(pfn_to_page(pfn
));
2017 /* Mark pages that correspond to the "original" pfns as "unsafe" */
2018 memory_bm_position_reset(bm
);
2020 pfn
= memory_bm_next_pfn(bm
);
2021 if (likely(pfn
!= BM_END_OF_MAP
)) {
2022 if (likely(pfn_valid(pfn
)))
2023 swsusp_set_page_free(pfn_to_page(pfn
));
2027 } while (pfn
!= BM_END_OF_MAP
);
2029 allocated_unsafe_pages
= 0;
2035 duplicate_memory_bitmap(struct memory_bitmap
*dst
, struct memory_bitmap
*src
)
2039 memory_bm_position_reset(src
);
2040 pfn
= memory_bm_next_pfn(src
);
2041 while (pfn
!= BM_END_OF_MAP
) {
2042 memory_bm_set_bit(dst
, pfn
);
2043 pfn
= memory_bm_next_pfn(src
);
2047 static int check_header(struct swsusp_info
*info
)
2051 reason
= check_image_kernel(info
);
2052 if (!reason
&& info
->num_physpages
!= get_num_physpages())
2053 reason
= "memory size";
2055 printk(KERN_ERR
"PM: Image mismatch: %s\n", reason
);
2062 * load header - check the image header and copy data from it
2066 load_header(struct swsusp_info
*info
)
2070 restore_pblist
= NULL
;
2071 error
= check_header(info
);
2073 nr_copy_pages
= info
->image_pages
;
2074 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
2080 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
2081 * the corresponding bit in the memory bitmap @bm
2083 static int unpack_orig_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
2087 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
2088 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
2091 /* Extract and buffer page key for data page (s390 only). */
2092 page_key_memorize(buf
+ j
);
2094 if (memory_bm_pfn_present(bm
, buf
[j
]))
2095 memory_bm_set_bit(bm
, buf
[j
]);
2103 /* List of "safe" pages that may be used to store data loaded from the suspend
2106 static struct linked_page
*safe_pages_list
;
2108 #ifdef CONFIG_HIGHMEM
2109 /* struct highmem_pbe is used for creating the list of highmem pages that
2110 * should be restored atomically during the resume from disk, because the page
2111 * frames they have occupied before the suspend are in use.
2113 struct highmem_pbe
{
2114 struct page
*copy_page
; /* data is here now */
2115 struct page
*orig_page
; /* data was here before the suspend */
2116 struct highmem_pbe
*next
;
2119 /* List of highmem PBEs needed for restoring the highmem pages that were
2120 * allocated before the suspend and included in the suspend image, but have
2121 * also been allocated by the "resume" kernel, so their contents cannot be
2122 * written directly to their "original" page frames.
2124 static struct highmem_pbe
*highmem_pblist
;
2127 * count_highmem_image_pages - compute the number of highmem pages in the
2128 * suspend image. The bits in the memory bitmap @bm that correspond to the
2129 * image pages are assumed to be set.
2132 static unsigned int count_highmem_image_pages(struct memory_bitmap
*bm
)
2135 unsigned int cnt
= 0;
2137 memory_bm_position_reset(bm
);
2138 pfn
= memory_bm_next_pfn(bm
);
2139 while (pfn
!= BM_END_OF_MAP
) {
2140 if (PageHighMem(pfn_to_page(pfn
)))
2143 pfn
= memory_bm_next_pfn(bm
);
2149 * prepare_highmem_image - try to allocate as many highmem pages as
2150 * there are highmem image pages (@nr_highmem_p points to the variable
2151 * containing the number of highmem image pages). The pages that are
2152 * "safe" (ie. will not be overwritten when the suspend image is
2153 * restored) have the corresponding bits set in @bm (it must be
2156 * NOTE: This function should not be called if there are no highmem
2160 static unsigned int safe_highmem_pages
;
2162 static struct memory_bitmap
*safe_highmem_bm
;
2165 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
2167 unsigned int to_alloc
;
2169 if (memory_bm_create(bm
, GFP_ATOMIC
, PG_SAFE
))
2172 if (get_highmem_buffer(PG_SAFE
))
2175 to_alloc
= count_free_highmem_pages();
2176 if (to_alloc
> *nr_highmem_p
)
2177 to_alloc
= *nr_highmem_p
;
2179 *nr_highmem_p
= to_alloc
;
2181 safe_highmem_pages
= 0;
2182 while (to_alloc
-- > 0) {
2185 page
= alloc_page(__GFP_HIGHMEM
);
2186 if (!swsusp_page_is_free(page
)) {
2187 /* The page is "safe", set its bit the bitmap */
2188 memory_bm_set_bit(bm
, page_to_pfn(page
));
2189 safe_highmem_pages
++;
2191 /* Mark the page as allocated */
2192 swsusp_set_page_forbidden(page
);
2193 swsusp_set_page_free(page
);
2195 memory_bm_position_reset(bm
);
2196 safe_highmem_bm
= bm
;
2201 * get_highmem_page_buffer - for given highmem image page find the buffer
2202 * that suspend_write_next() should set for its caller to write to.
2204 * If the page is to be saved to its "original" page frame or a copy of
2205 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2206 * the copy of the page is to be made in normal memory, so the address of
2207 * the copy is returned.
2209 * If @buffer is returned, the caller of suspend_write_next() will write
2210 * the page's contents to @buffer, so they will have to be copied to the
2211 * right location on the next call to suspend_write_next() and it is done
2212 * with the help of copy_last_highmem_page(). For this purpose, if
2213 * @buffer is returned, @last_highmem page is set to the page to which
2214 * the data will have to be copied from @buffer.
2217 static struct page
*last_highmem_page
;
2220 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
2222 struct highmem_pbe
*pbe
;
2225 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
)) {
2226 /* We have allocated the "original" page frame and we can
2227 * use it directly to store the loaded page.
2229 last_highmem_page
= page
;
2232 /* The "original" page frame has not been allocated and we have to
2233 * use a "safe" page frame to store the loaded page.
2235 pbe
= chain_alloc(ca
, sizeof(struct highmem_pbe
));
2238 return ERR_PTR(-ENOMEM
);
2240 pbe
->orig_page
= page
;
2241 if (safe_highmem_pages
> 0) {
2244 /* Copy of the page will be stored in high memory */
2246 tmp
= pfn_to_page(memory_bm_next_pfn(safe_highmem_bm
));
2247 safe_highmem_pages
--;
2248 last_highmem_page
= tmp
;
2249 pbe
->copy_page
= tmp
;
2251 /* Copy of the page will be stored in normal memory */
2252 kaddr
= safe_pages_list
;
2253 safe_pages_list
= safe_pages_list
->next
;
2254 pbe
->copy_page
= virt_to_page(kaddr
);
2256 pbe
->next
= highmem_pblist
;
2257 highmem_pblist
= pbe
;
2262 * copy_last_highmem_page - copy the contents of a highmem image from
2263 * @buffer, where the caller of snapshot_write_next() has place them,
2264 * to the right location represented by @last_highmem_page .
2267 static void copy_last_highmem_page(void)
2269 if (last_highmem_page
) {
2272 dst
= kmap_atomic(last_highmem_page
);
2273 copy_page(dst
, buffer
);
2275 last_highmem_page
= NULL
;
2279 static inline int last_highmem_page_copied(void)
2281 return !last_highmem_page
;
2284 static inline void free_highmem_data(void)
2286 if (safe_highmem_bm
)
2287 memory_bm_free(safe_highmem_bm
, PG_UNSAFE_CLEAR
);
2290 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
2293 static inline int get_safe_write_buffer(void) { return 0; }
2296 count_highmem_image_pages(struct memory_bitmap
*bm
) { return 0; }
2299 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
2304 static inline void *
2305 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
2307 return ERR_PTR(-EINVAL
);
2310 static inline void copy_last_highmem_page(void) {}
2311 static inline int last_highmem_page_copied(void) { return 1; }
2312 static inline void free_highmem_data(void) {}
2313 #endif /* CONFIG_HIGHMEM */
2316 * prepare_image - use the memory bitmap @bm to mark the pages that will
2317 * be overwritten in the process of restoring the system memory state
2318 * from the suspend image ("unsafe" pages) and allocate memory for the
2321 * The idea is to allocate a new memory bitmap first and then allocate
2322 * as many pages as needed for the image data, but not to assign these
2323 * pages to specific tasks initially. Instead, we just mark them as
2324 * allocated and create a lists of "safe" pages that will be used
2325 * later. On systems with high memory a list of "safe" highmem pages is
2329 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2332 prepare_image(struct memory_bitmap
*new_bm
, struct memory_bitmap
*bm
)
2334 unsigned int nr_pages
, nr_highmem
;
2335 struct linked_page
*sp_list
, *lp
;
2338 /* If there is no highmem, the buffer will not be necessary */
2339 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
2342 nr_highmem
= count_highmem_image_pages(bm
);
2343 error
= mark_unsafe_pages(bm
);
2347 error
= memory_bm_create(new_bm
, GFP_ATOMIC
, PG_SAFE
);
2351 duplicate_memory_bitmap(new_bm
, bm
);
2352 memory_bm_free(bm
, PG_UNSAFE_KEEP
);
2353 if (nr_highmem
> 0) {
2354 error
= prepare_highmem_image(bm
, &nr_highmem
);
2358 /* Reserve some safe pages for potential later use.
2360 * NOTE: This way we make sure there will be enough safe pages for the
2361 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2362 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2365 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2366 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
2367 nr_pages
= DIV_ROUND_UP(nr_pages
, PBES_PER_LINKED_PAGE
);
2368 while (nr_pages
> 0) {
2369 lp
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
2378 /* Preallocate memory for the image */
2379 safe_pages_list
= NULL
;
2380 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
2381 while (nr_pages
> 0) {
2382 lp
= (struct linked_page
*)get_zeroed_page(GFP_ATOMIC
);
2387 if (!swsusp_page_is_free(virt_to_page(lp
))) {
2388 /* The page is "safe", add it to the list */
2389 lp
->next
= safe_pages_list
;
2390 safe_pages_list
= lp
;
2392 /* Mark the page as allocated */
2393 swsusp_set_page_forbidden(virt_to_page(lp
));
2394 swsusp_set_page_free(virt_to_page(lp
));
2397 /* Free the reserved safe pages so that chain_alloc() can use them */
2400 free_image_page(sp_list
, PG_UNSAFE_CLEAR
);
2411 * get_buffer - compute the address that snapshot_write_next() should
2412 * set for its caller to write to.
2415 static void *get_buffer(struct memory_bitmap
*bm
, struct chain_allocator
*ca
)
2419 unsigned long pfn
= memory_bm_next_pfn(bm
);
2421 if (pfn
== BM_END_OF_MAP
)
2422 return ERR_PTR(-EFAULT
);
2424 page
= pfn_to_page(pfn
);
2425 if (PageHighMem(page
))
2426 return get_highmem_page_buffer(page
, ca
);
2428 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
))
2429 /* We have allocated the "original" page frame and we can
2430 * use it directly to store the loaded page.
2432 return page_address(page
);
2434 /* The "original" page frame has not been allocated and we have to
2435 * use a "safe" page frame to store the loaded page.
2437 pbe
= chain_alloc(ca
, sizeof(struct pbe
));
2440 return ERR_PTR(-ENOMEM
);
2442 pbe
->orig_address
= page_address(page
);
2443 pbe
->address
= safe_pages_list
;
2444 safe_pages_list
= safe_pages_list
->next
;
2445 pbe
->next
= restore_pblist
;
2446 restore_pblist
= pbe
;
2447 return pbe
->address
;
2451 * snapshot_write_next - used for writing the system memory snapshot.
2453 * On the first call to it @handle should point to a zeroed
2454 * snapshot_handle structure. The structure gets updated and a pointer
2455 * to it should be passed to this function every next time.
2457 * On success the function returns a positive number. Then, the caller
2458 * is allowed to write up to the returned number of bytes to the memory
2459 * location computed by the data_of() macro.
2461 * The function returns 0 to indicate the "end of file" condition,
2462 * and a negative number is returned on error. In such cases the
2463 * structure pointed to by @handle is not updated and should not be used
2467 int snapshot_write_next(struct snapshot_handle
*handle
)
2469 static struct chain_allocator ca
;
2472 /* Check if we have already loaded the entire image */
2473 if (handle
->cur
> 1 && handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
2476 handle
->sync_read
= 1;
2480 /* This makes the buffer be freed by swsusp_free() */
2481 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
2486 handle
->buffer
= buffer
;
2487 } else if (handle
->cur
== 1) {
2488 error
= load_header(buffer
);
2492 error
= memory_bm_create(©_bm
, GFP_ATOMIC
, PG_ANY
);
2496 /* Allocate buffer for page keys. */
2497 error
= page_key_alloc(nr_copy_pages
);
2501 } else if (handle
->cur
<= nr_meta_pages
+ 1) {
2502 error
= unpack_orig_pfns(buffer
, ©_bm
);
2506 if (handle
->cur
== nr_meta_pages
+ 1) {
2507 error
= prepare_image(&orig_bm
, ©_bm
);
2511 chain_init(&ca
, GFP_ATOMIC
, PG_SAFE
);
2512 memory_bm_position_reset(&orig_bm
);
2513 restore_pblist
= NULL
;
2514 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
2515 handle
->sync_read
= 0;
2516 if (IS_ERR(handle
->buffer
))
2517 return PTR_ERR(handle
->buffer
);
2520 copy_last_highmem_page();
2521 /* Restore page key for data page (s390 only). */
2522 page_key_write(handle
->buffer
);
2523 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
2524 if (IS_ERR(handle
->buffer
))
2525 return PTR_ERR(handle
->buffer
);
2526 if (handle
->buffer
!= buffer
)
2527 handle
->sync_read
= 0;
2534 * snapshot_write_finalize - must be called after the last call to
2535 * snapshot_write_next() in case the last page in the image happens
2536 * to be a highmem page and its contents should be stored in the
2537 * highmem. Additionally, it releases the memory that will not be
2541 void snapshot_write_finalize(struct snapshot_handle
*handle
)
2543 copy_last_highmem_page();
2544 /* Restore page key for data page (s390 only). */
2545 page_key_write(handle
->buffer
);
2547 /* Free only if we have loaded the image entirely */
2548 if (handle
->cur
> 1 && handle
->cur
> nr_meta_pages
+ nr_copy_pages
) {
2549 memory_bm_free(&orig_bm
, PG_UNSAFE_CLEAR
);
2550 free_highmem_data();
2554 int snapshot_image_loaded(struct snapshot_handle
*handle
)
2556 return !(!nr_copy_pages
|| !last_highmem_page_copied() ||
2557 handle
->cur
<= nr_meta_pages
+ nr_copy_pages
);
2560 #ifdef CONFIG_HIGHMEM
2561 /* Assumes that @buf is ready and points to a "safe" page */
2563 swap_two_pages_data(struct page
*p1
, struct page
*p2
, void *buf
)
2565 void *kaddr1
, *kaddr2
;
2567 kaddr1
= kmap_atomic(p1
);
2568 kaddr2
= kmap_atomic(p2
);
2569 copy_page(buf
, kaddr1
);
2570 copy_page(kaddr1
, kaddr2
);
2571 copy_page(kaddr2
, buf
);
2572 kunmap_atomic(kaddr2
);
2573 kunmap_atomic(kaddr1
);
2577 * restore_highmem - for each highmem page that was allocated before
2578 * the suspend and included in the suspend image, and also has been
2579 * allocated by the "resume" kernel swap its current (ie. "before
2580 * resume") contents with the previous (ie. "before suspend") one.
2582 * If the resume eventually fails, we can call this function once
2583 * again and restore the "before resume" highmem state.
2586 int restore_highmem(void)
2588 struct highmem_pbe
*pbe
= highmem_pblist
;
2594 buf
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
2599 swap_two_pages_data(pbe
->copy_page
, pbe
->orig_page
, buf
);
2602 free_image_page(buf
, PG_UNSAFE_CLEAR
);
2605 #endif /* CONFIG_HIGHMEM */