1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/power/snapshot.c
5 * This file provides system snapshot/restore functionality for swsusp.
7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
11 #define pr_fmt(fmt) "PM: " fmt
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/memblock.h>
25 #include <linux/nmi.h>
26 #include <linux/syscalls.h>
27 #include <linux/console.h>
28 #include <linux/highmem.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
31 #include <linux/compiler.h>
32 #include <linux/ktime.h>
33 #include <linux/set_memory.h>
35 #include <linux/uaccess.h>
36 #include <asm/mmu_context.h>
37 #include <asm/pgtable.h>
38 #include <asm/tlbflush.h>
43 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
44 static bool hibernate_restore_protection
;
45 static bool hibernate_restore_protection_active
;
47 void enable_restore_image_protection(void)
49 hibernate_restore_protection
= true;
52 static inline void hibernate_restore_protection_begin(void)
54 hibernate_restore_protection_active
= hibernate_restore_protection
;
57 static inline void hibernate_restore_protection_end(void)
59 hibernate_restore_protection_active
= false;
62 static inline void hibernate_restore_protect_page(void *page_address
)
64 if (hibernate_restore_protection_active
)
65 set_memory_ro((unsigned long)page_address
, 1);
68 static inline void hibernate_restore_unprotect_page(void *page_address
)
70 if (hibernate_restore_protection_active
)
71 set_memory_rw((unsigned long)page_address
, 1);
74 static inline void hibernate_restore_protection_begin(void) {}
75 static inline void hibernate_restore_protection_end(void) {}
76 static inline void hibernate_restore_protect_page(void *page_address
) {}
77 static inline void hibernate_restore_unprotect_page(void *page_address
) {}
78 #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
80 static int swsusp_page_is_free(struct page
*);
81 static void swsusp_set_page_forbidden(struct page
*);
82 static void swsusp_unset_page_forbidden(struct page
*);
85 * Number of bytes to reserve for memory allocations made by device drivers
86 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
87 * cause image creation to fail (tunable via /sys/power/reserved_size).
89 unsigned long reserved_size
;
91 void __init
hibernate_reserved_size_init(void)
93 reserved_size
= SPARE_PAGES
* PAGE_SIZE
;
97 * Preferred image size in bytes (tunable via /sys/power/image_size).
98 * When it is set to N, swsusp will do its best to ensure the image
99 * size will not exceed N bytes, but if that is impossible, it will
100 * try to create the smallest image possible.
102 unsigned long image_size
;
104 void __init
hibernate_image_size_init(void)
106 image_size
= ((totalram_pages() * 2) / 5) * PAGE_SIZE
;
110 * List of PBEs needed for restoring the pages that were allocated before
111 * the suspend and included in the suspend image, but have also been
112 * allocated by the "resume" kernel, so their contents cannot be written
113 * directly to their "original" page frames.
115 struct pbe
*restore_pblist
;
117 /* struct linked_page is used to build chains of pages */
119 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
122 struct linked_page
*next
;
123 char data
[LINKED_PAGE_DATA_SIZE
];
127 * List of "safe" pages (ie. pages that were not used by the image kernel
128 * before hibernation) that may be used as temporary storage for image kernel
131 static struct linked_page
*safe_pages_list
;
133 /* Pointer to an auxiliary buffer (1 page) */
138 #define PG_UNSAFE_CLEAR 1
139 #define PG_UNSAFE_KEEP 0
141 static unsigned int allocated_unsafe_pages
;
144 * get_image_page - Allocate a page for a hibernation image.
145 * @gfp_mask: GFP mask for the allocation.
146 * @safe_needed: Get pages that were not used before hibernation (restore only)
148 * During image restoration, for storing the PBE list and the image data, we can
149 * only use memory pages that do not conflict with the pages used before
150 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
151 * using allocated_unsafe_pages.
153 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
154 * swsusp_free() can release it.
156 static void *get_image_page(gfp_t gfp_mask
, int safe_needed
)
160 res
= (void *)get_zeroed_page(gfp_mask
);
162 while (res
&& swsusp_page_is_free(virt_to_page(res
))) {
163 /* The page is unsafe, mark it for swsusp_free() */
164 swsusp_set_page_forbidden(virt_to_page(res
));
165 allocated_unsafe_pages
++;
166 res
= (void *)get_zeroed_page(gfp_mask
);
169 swsusp_set_page_forbidden(virt_to_page(res
));
170 swsusp_set_page_free(virt_to_page(res
));
175 static void *__get_safe_page(gfp_t gfp_mask
)
177 if (safe_pages_list
) {
178 void *ret
= safe_pages_list
;
180 safe_pages_list
= safe_pages_list
->next
;
181 memset(ret
, 0, PAGE_SIZE
);
184 return get_image_page(gfp_mask
, PG_SAFE
);
187 unsigned long get_safe_page(gfp_t gfp_mask
)
189 return (unsigned long)__get_safe_page(gfp_mask
);
192 static struct page
*alloc_image_page(gfp_t gfp_mask
)
196 page
= alloc_page(gfp_mask
);
198 swsusp_set_page_forbidden(page
);
199 swsusp_set_page_free(page
);
204 static void recycle_safe_page(void *page_address
)
206 struct linked_page
*lp
= page_address
;
208 lp
->next
= safe_pages_list
;
209 safe_pages_list
= lp
;
213 * free_image_page - Free a page allocated for hibernation image.
214 * @addr: Address of the page to free.
215 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
217 * The page to free should have been allocated by get_image_page() (page flags
218 * set by it are affected).
220 static inline void free_image_page(void *addr
, int clear_nosave_free
)
224 BUG_ON(!virt_addr_valid(addr
));
226 page
= virt_to_page(addr
);
228 swsusp_unset_page_forbidden(page
);
229 if (clear_nosave_free
)
230 swsusp_unset_page_free(page
);
235 static inline void free_list_of_pages(struct linked_page
*list
,
236 int clear_page_nosave
)
239 struct linked_page
*lp
= list
->next
;
241 free_image_page(list
, clear_page_nosave
);
247 * struct chain_allocator is used for allocating small objects out of
248 * a linked list of pages called 'the chain'.
250 * The chain grows each time when there is no room for a new object in
251 * the current page. The allocated objects cannot be freed individually.
252 * It is only possible to free them all at once, by freeing the entire
255 * NOTE: The chain allocator may be inefficient if the allocated objects
256 * are not much smaller than PAGE_SIZE.
258 struct chain_allocator
{
259 struct linked_page
*chain
; /* the chain */
260 unsigned int used_space
; /* total size of objects allocated out
261 of the current page */
262 gfp_t gfp_mask
; /* mask for allocating pages */
263 int safe_needed
; /* if set, only "safe" pages are allocated */
266 static void chain_init(struct chain_allocator
*ca
, gfp_t gfp_mask
,
270 ca
->used_space
= LINKED_PAGE_DATA_SIZE
;
271 ca
->gfp_mask
= gfp_mask
;
272 ca
->safe_needed
= safe_needed
;
275 static void *chain_alloc(struct chain_allocator
*ca
, unsigned int size
)
279 if (LINKED_PAGE_DATA_SIZE
- ca
->used_space
< size
) {
280 struct linked_page
*lp
;
282 lp
= ca
->safe_needed
? __get_safe_page(ca
->gfp_mask
) :
283 get_image_page(ca
->gfp_mask
, PG_ANY
);
287 lp
->next
= ca
->chain
;
291 ret
= ca
->chain
->data
+ ca
->used_space
;
292 ca
->used_space
+= size
;
297 * Data types related to memory bitmaps.
299 * Memory bitmap is a structure consiting of many linked lists of
300 * objects. The main list's elements are of type struct zone_bitmap
301 * and each of them corresonds to one zone. For each zone bitmap
302 * object there is a list of objects of type struct bm_block that
303 * represent each blocks of bitmap in which information is stored.
305 * struct memory_bitmap contains a pointer to the main list of zone
306 * bitmap objects, a struct bm_position used for browsing the bitmap,
307 * and a pointer to the list of pages used for allocating all of the
308 * zone bitmap objects and bitmap block objects.
310 * NOTE: It has to be possible to lay out the bitmap in memory
311 * using only allocations of order 0. Additionally, the bitmap is
312 * designed to work with arbitrary number of zones (this is over the
313 * top for now, but let's avoid making unnecessary assumptions ;-).
315 * struct zone_bitmap contains a pointer to a list of bitmap block
316 * objects and a pointer to the bitmap block object that has been
317 * most recently used for setting bits. Additionally, it contains the
318 * PFNs that correspond to the start and end of the represented zone.
320 * struct bm_block contains a pointer to the memory page in which
321 * information is stored (in the form of a block of bitmap)
322 * It also contains the pfns that correspond to the start and end of
323 * the represented memory area.
325 * The memory bitmap is organized as a radix tree to guarantee fast random
326 * access to the bits. There is one radix tree for each zone (as returned
327 * from create_mem_extents).
329 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
330 * two linked lists for the nodes of the tree, one for the inner nodes and
331 * one for the leave nodes. The linked leave nodes are used for fast linear
332 * access of the memory bitmap.
334 * The struct rtree_node represents one node of the radix tree.
337 #define BM_END_OF_MAP (~0UL)
339 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
340 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
341 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
344 * struct rtree_node is a wrapper struct to link the nodes
345 * of the rtree together for easy linear iteration over
346 * bits and easy freeing
349 struct list_head list
;
354 * struct mem_zone_bm_rtree represents a bitmap used for one
355 * populated memory zone.
357 struct mem_zone_bm_rtree
{
358 struct list_head list
; /* Link Zones together */
359 struct list_head nodes
; /* Radix Tree inner nodes */
360 struct list_head leaves
; /* Radix Tree leaves */
361 unsigned long start_pfn
; /* Zone start page frame */
362 unsigned long end_pfn
; /* Zone end page frame + 1 */
363 struct rtree_node
*rtree
; /* Radix Tree Root */
364 int levels
; /* Number of Radix Tree Levels */
365 unsigned int blocks
; /* Number of Bitmap Blocks */
368 /* strcut bm_position is used for browsing memory bitmaps */
371 struct mem_zone_bm_rtree
*zone
;
372 struct rtree_node
*node
;
373 unsigned long node_pfn
;
377 struct memory_bitmap
{
378 struct list_head zones
;
379 struct linked_page
*p_list
; /* list of pages used to store zone
380 bitmap objects and bitmap block
382 struct bm_position cur
; /* most recently used bit position */
385 /* Functions that operate on memory bitmaps */
387 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
388 #if BITS_PER_LONG == 32
389 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
391 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
393 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
396 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
398 * This function is used to allocate inner nodes as well as the
399 * leave nodes of the radix tree. It also adds the node to the
400 * corresponding linked list passed in by the *list parameter.
402 static struct rtree_node
*alloc_rtree_node(gfp_t gfp_mask
, int safe_needed
,
403 struct chain_allocator
*ca
,
404 struct list_head
*list
)
406 struct rtree_node
*node
;
408 node
= chain_alloc(ca
, sizeof(struct rtree_node
));
412 node
->data
= get_image_page(gfp_mask
, safe_needed
);
416 list_add_tail(&node
->list
, list
);
422 * add_rtree_block - Add a new leave node to the radix tree.
424 * The leave nodes need to be allocated in order to keep the leaves
425 * linked list in order. This is guaranteed by the zone->blocks
428 static int add_rtree_block(struct mem_zone_bm_rtree
*zone
, gfp_t gfp_mask
,
429 int safe_needed
, struct chain_allocator
*ca
)
431 struct rtree_node
*node
, *block
, **dst
;
432 unsigned int levels_needed
, block_nr
;
435 block_nr
= zone
->blocks
;
438 /* How many levels do we need for this block nr? */
441 block_nr
>>= BM_RTREE_LEVEL_SHIFT
;
444 /* Make sure the rtree has enough levels */
445 for (i
= zone
->levels
; i
< levels_needed
; i
++) {
446 node
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
,
451 node
->data
[0] = (unsigned long)zone
->rtree
;
456 /* Allocate new block */
457 block
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
, &zone
->leaves
);
461 /* Now walk the rtree to insert the block */
464 block_nr
= zone
->blocks
;
465 for (i
= zone
->levels
; i
> 0; i
--) {
469 node
= alloc_rtree_node(gfp_mask
, safe_needed
, ca
,
476 index
= block_nr
>> ((i
- 1) * BM_RTREE_LEVEL_SHIFT
);
477 index
&= BM_RTREE_LEVEL_MASK
;
478 dst
= (struct rtree_node
**)&((*dst
)->data
[index
]);
488 static void free_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
,
489 int clear_nosave_free
);
492 * create_zone_bm_rtree - Create a radix tree for one zone.
494 * Allocated the mem_zone_bm_rtree structure and initializes it.
495 * This function also allocated and builds the radix tree for the
498 static struct mem_zone_bm_rtree
*create_zone_bm_rtree(gfp_t gfp_mask
,
500 struct chain_allocator
*ca
,
504 struct mem_zone_bm_rtree
*zone
;
505 unsigned int i
, nr_blocks
;
509 zone
= chain_alloc(ca
, sizeof(struct mem_zone_bm_rtree
));
513 INIT_LIST_HEAD(&zone
->nodes
);
514 INIT_LIST_HEAD(&zone
->leaves
);
515 zone
->start_pfn
= start
;
517 nr_blocks
= DIV_ROUND_UP(pages
, BM_BITS_PER_BLOCK
);
519 for (i
= 0; i
< nr_blocks
; i
++) {
520 if (add_rtree_block(zone
, gfp_mask
, safe_needed
, ca
)) {
521 free_zone_bm_rtree(zone
, PG_UNSAFE_CLEAR
);
530 * free_zone_bm_rtree - Free the memory of the radix tree.
532 * Free all node pages of the radix tree. The mem_zone_bm_rtree
533 * structure itself is not freed here nor are the rtree_node
536 static void free_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
,
537 int clear_nosave_free
)
539 struct rtree_node
*node
;
541 list_for_each_entry(node
, &zone
->nodes
, list
)
542 free_image_page(node
->data
, clear_nosave_free
);
544 list_for_each_entry(node
, &zone
->leaves
, list
)
545 free_image_page(node
->data
, clear_nosave_free
);
548 static void memory_bm_position_reset(struct memory_bitmap
*bm
)
550 bm
->cur
.zone
= list_entry(bm
->zones
.next
, struct mem_zone_bm_rtree
,
552 bm
->cur
.node
= list_entry(bm
->cur
.zone
->leaves
.next
,
553 struct rtree_node
, list
);
554 bm
->cur
.node_pfn
= 0;
555 bm
->cur
.node_bit
= 0;
558 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
);
561 struct list_head hook
;
567 * free_mem_extents - Free a list of memory extents.
568 * @list: List of extents to free.
570 static void free_mem_extents(struct list_head
*list
)
572 struct mem_extent
*ext
, *aux
;
574 list_for_each_entry_safe(ext
, aux
, list
, hook
) {
575 list_del(&ext
->hook
);
581 * create_mem_extents - Create a list of memory extents.
582 * @list: List to put the extents into.
583 * @gfp_mask: Mask to use for memory allocations.
585 * The extents represent contiguous ranges of PFNs.
587 static int create_mem_extents(struct list_head
*list
, gfp_t gfp_mask
)
591 INIT_LIST_HEAD(list
);
593 for_each_populated_zone(zone
) {
594 unsigned long zone_start
, zone_end
;
595 struct mem_extent
*ext
, *cur
, *aux
;
597 zone_start
= zone
->zone_start_pfn
;
598 zone_end
= zone_end_pfn(zone
);
600 list_for_each_entry(ext
, list
, hook
)
601 if (zone_start
<= ext
->end
)
604 if (&ext
->hook
== list
|| zone_end
< ext
->start
) {
605 /* New extent is necessary */
606 struct mem_extent
*new_ext
;
608 new_ext
= kzalloc(sizeof(struct mem_extent
), gfp_mask
);
610 free_mem_extents(list
);
613 new_ext
->start
= zone_start
;
614 new_ext
->end
= zone_end
;
615 list_add_tail(&new_ext
->hook
, &ext
->hook
);
619 /* Merge this zone's range of PFNs with the existing one */
620 if (zone_start
< ext
->start
)
621 ext
->start
= zone_start
;
622 if (zone_end
> ext
->end
)
625 /* More merging may be possible */
627 list_for_each_entry_safe_continue(cur
, aux
, list
, hook
) {
628 if (zone_end
< cur
->start
)
630 if (zone_end
< cur
->end
)
632 list_del(&cur
->hook
);
641 * memory_bm_create - Allocate memory for a memory bitmap.
643 static int memory_bm_create(struct memory_bitmap
*bm
, gfp_t gfp_mask
,
646 struct chain_allocator ca
;
647 struct list_head mem_extents
;
648 struct mem_extent
*ext
;
651 chain_init(&ca
, gfp_mask
, safe_needed
);
652 INIT_LIST_HEAD(&bm
->zones
);
654 error
= create_mem_extents(&mem_extents
, gfp_mask
);
658 list_for_each_entry(ext
, &mem_extents
, hook
) {
659 struct mem_zone_bm_rtree
*zone
;
661 zone
= create_zone_bm_rtree(gfp_mask
, safe_needed
, &ca
,
662 ext
->start
, ext
->end
);
667 list_add_tail(&zone
->list
, &bm
->zones
);
670 bm
->p_list
= ca
.chain
;
671 memory_bm_position_reset(bm
);
673 free_mem_extents(&mem_extents
);
677 bm
->p_list
= ca
.chain
;
678 memory_bm_free(bm
, PG_UNSAFE_CLEAR
);
683 * memory_bm_free - Free memory occupied by the memory bitmap.
684 * @bm: Memory bitmap.
686 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
)
688 struct mem_zone_bm_rtree
*zone
;
690 list_for_each_entry(zone
, &bm
->zones
, list
)
691 free_zone_bm_rtree(zone
, clear_nosave_free
);
693 free_list_of_pages(bm
->p_list
, clear_nosave_free
);
695 INIT_LIST_HEAD(&bm
->zones
);
699 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
701 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
702 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
704 * Walk the radix tree to find the page containing the bit that represents @pfn
705 * and return the position of the bit in @addr and @bit_nr.
707 static int memory_bm_find_bit(struct memory_bitmap
*bm
, unsigned long pfn
,
708 void **addr
, unsigned int *bit_nr
)
710 struct mem_zone_bm_rtree
*curr
, *zone
;
711 struct rtree_node
*node
;
716 if (pfn
>= zone
->start_pfn
&& pfn
< zone
->end_pfn
)
721 /* Find the right zone */
722 list_for_each_entry(curr
, &bm
->zones
, list
) {
723 if (pfn
>= curr
->start_pfn
&& pfn
< curr
->end_pfn
) {
734 * We have found the zone. Now walk the radix tree to find the leaf node
739 * If the zone we wish to scan is the the current zone and the
740 * pfn falls into the current node then we do not need to walk
744 if (zone
== bm
->cur
.zone
&&
745 ((pfn
- zone
->start_pfn
) & ~BM_BLOCK_MASK
) == bm
->cur
.node_pfn
)
749 block_nr
= (pfn
- zone
->start_pfn
) >> BM_BLOCK_SHIFT
;
751 for (i
= zone
->levels
; i
> 0; i
--) {
754 index
= block_nr
>> ((i
- 1) * BM_RTREE_LEVEL_SHIFT
);
755 index
&= BM_RTREE_LEVEL_MASK
;
756 BUG_ON(node
->data
[index
] == 0);
757 node
= (struct rtree_node
*)node
->data
[index
];
761 /* Update last position */
764 bm
->cur
.node_pfn
= (pfn
- zone
->start_pfn
) & ~BM_BLOCK_MASK
;
766 /* Set return values */
768 *bit_nr
= (pfn
- zone
->start_pfn
) & BM_BLOCK_MASK
;
773 static void memory_bm_set_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
779 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
784 static int mem_bm_set_bit_check(struct memory_bitmap
*bm
, unsigned long pfn
)
790 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
797 static void memory_bm_clear_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
803 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
805 clear_bit(bit
, addr
);
808 static void memory_bm_clear_current(struct memory_bitmap
*bm
)
812 bit
= max(bm
->cur
.node_bit
- 1, 0);
813 clear_bit(bit
, bm
->cur
.node
->data
);
816 static int memory_bm_test_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
822 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
824 return test_bit(bit
, addr
);
827 static bool memory_bm_pfn_present(struct memory_bitmap
*bm
, unsigned long pfn
)
832 return !memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
836 * rtree_next_node - Jump to the next leaf node.
838 * Set the position to the beginning of the next node in the
839 * memory bitmap. This is either the next node in the current
840 * zone's radix tree or the first node in the radix tree of the
843 * Return true if there is a next node, false otherwise.
845 static bool rtree_next_node(struct memory_bitmap
*bm
)
847 if (!list_is_last(&bm
->cur
.node
->list
, &bm
->cur
.zone
->leaves
)) {
848 bm
->cur
.node
= list_entry(bm
->cur
.node
->list
.next
,
849 struct rtree_node
, list
);
850 bm
->cur
.node_pfn
+= BM_BITS_PER_BLOCK
;
851 bm
->cur
.node_bit
= 0;
852 touch_softlockup_watchdog();
856 /* No more nodes, goto next zone */
857 if (!list_is_last(&bm
->cur
.zone
->list
, &bm
->zones
)) {
858 bm
->cur
.zone
= list_entry(bm
->cur
.zone
->list
.next
,
859 struct mem_zone_bm_rtree
, list
);
860 bm
->cur
.node
= list_entry(bm
->cur
.zone
->leaves
.next
,
861 struct rtree_node
, list
);
862 bm
->cur
.node_pfn
= 0;
863 bm
->cur
.node_bit
= 0;
872 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
873 * @bm: Memory bitmap.
875 * Starting from the last returned position this function searches for the next
876 * set bit in @bm and returns the PFN represented by it. If no more bits are
877 * set, BM_END_OF_MAP is returned.
879 * It is required to run memory_bm_position_reset() before the first call to
880 * this function for the given memory bitmap.
882 static unsigned long memory_bm_next_pfn(struct memory_bitmap
*bm
)
884 unsigned long bits
, pfn
, pages
;
888 pages
= bm
->cur
.zone
->end_pfn
- bm
->cur
.zone
->start_pfn
;
889 bits
= min(pages
- bm
->cur
.node_pfn
, BM_BITS_PER_BLOCK
);
890 bit
= find_next_bit(bm
->cur
.node
->data
, bits
,
893 pfn
= bm
->cur
.zone
->start_pfn
+ bm
->cur
.node_pfn
+ bit
;
894 bm
->cur
.node_bit
= bit
+ 1;
897 } while (rtree_next_node(bm
));
899 return BM_END_OF_MAP
;
903 * This structure represents a range of page frames the contents of which
904 * should not be saved during hibernation.
906 struct nosave_region
{
907 struct list_head list
;
908 unsigned long start_pfn
;
909 unsigned long end_pfn
;
912 static LIST_HEAD(nosave_regions
);
914 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree
*zone
)
916 struct rtree_node
*node
;
918 list_for_each_entry(node
, &zone
->nodes
, list
)
919 recycle_safe_page(node
->data
);
921 list_for_each_entry(node
, &zone
->leaves
, list
)
922 recycle_safe_page(node
->data
);
925 static void memory_bm_recycle(struct memory_bitmap
*bm
)
927 struct mem_zone_bm_rtree
*zone
;
928 struct linked_page
*p_list
;
930 list_for_each_entry(zone
, &bm
->zones
, list
)
931 recycle_zone_bm_rtree(zone
);
935 struct linked_page
*lp
= p_list
;
938 recycle_safe_page(lp
);
943 * register_nosave_region - Register a region of unsaveable memory.
945 * Register a range of page frames the contents of which should not be saved
946 * during hibernation (to be used in the early initialization code).
948 void __init
__register_nosave_region(unsigned long start_pfn
,
949 unsigned long end_pfn
, int use_kmalloc
)
951 struct nosave_region
*region
;
953 if (start_pfn
>= end_pfn
)
956 if (!list_empty(&nosave_regions
)) {
957 /* Try to extend the previous region (they should be sorted) */
958 region
= list_entry(nosave_regions
.prev
,
959 struct nosave_region
, list
);
960 if (region
->end_pfn
== start_pfn
) {
961 region
->end_pfn
= end_pfn
;
966 /* During init, this shouldn't fail */
967 region
= kmalloc(sizeof(struct nosave_region
), GFP_KERNEL
);
970 /* This allocation cannot fail */
971 region
= memblock_alloc(sizeof(struct nosave_region
),
974 panic("%s: Failed to allocate %zu bytes\n", __func__
,
975 sizeof(struct nosave_region
));
977 region
->start_pfn
= start_pfn
;
978 region
->end_pfn
= end_pfn
;
979 list_add_tail(®ion
->list
, &nosave_regions
);
981 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
982 (unsigned long long) start_pfn
<< PAGE_SHIFT
,
983 ((unsigned long long) end_pfn
<< PAGE_SHIFT
) - 1);
987 * Set bits in this map correspond to the page frames the contents of which
988 * should not be saved during the suspend.
990 static struct memory_bitmap
*forbidden_pages_map
;
992 /* Set bits in this map correspond to free page frames. */
993 static struct memory_bitmap
*free_pages_map
;
996 * Each page frame allocated for creating the image is marked by setting the
997 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
1000 void swsusp_set_page_free(struct page
*page
)
1003 memory_bm_set_bit(free_pages_map
, page_to_pfn(page
));
1006 static int swsusp_page_is_free(struct page
*page
)
1008 return free_pages_map
?
1009 memory_bm_test_bit(free_pages_map
, page_to_pfn(page
)) : 0;
1012 void swsusp_unset_page_free(struct page
*page
)
1015 memory_bm_clear_bit(free_pages_map
, page_to_pfn(page
));
1018 static void swsusp_set_page_forbidden(struct page
*page
)
1020 if (forbidden_pages_map
)
1021 memory_bm_set_bit(forbidden_pages_map
, page_to_pfn(page
));
1024 int swsusp_page_is_forbidden(struct page
*page
)
1026 return forbidden_pages_map
?
1027 memory_bm_test_bit(forbidden_pages_map
, page_to_pfn(page
)) : 0;
1030 static void swsusp_unset_page_forbidden(struct page
*page
)
1032 if (forbidden_pages_map
)
1033 memory_bm_clear_bit(forbidden_pages_map
, page_to_pfn(page
));
1037 * mark_nosave_pages - Mark pages that should not be saved.
1038 * @bm: Memory bitmap.
1040 * Set the bits in @bm that correspond to the page frames the contents of which
1041 * should not be saved.
1043 static void mark_nosave_pages(struct memory_bitmap
*bm
)
1045 struct nosave_region
*region
;
1047 if (list_empty(&nosave_regions
))
1050 list_for_each_entry(region
, &nosave_regions
, list
) {
1053 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1054 (unsigned long long) region
->start_pfn
<< PAGE_SHIFT
,
1055 ((unsigned long long) region
->end_pfn
<< PAGE_SHIFT
)
1058 for (pfn
= region
->start_pfn
; pfn
< region
->end_pfn
; pfn
++)
1059 if (pfn_valid(pfn
)) {
1061 * It is safe to ignore the result of
1062 * mem_bm_set_bit_check() here, since we won't
1063 * touch the PFNs for which the error is
1066 mem_bm_set_bit_check(bm
, pfn
);
1072 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1074 * Create bitmaps needed for marking page frames that should not be saved and
1075 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1076 * only modified if everything goes well, because we don't want the bits to be
1077 * touched before both bitmaps are set up.
1079 int create_basic_memory_bitmaps(void)
1081 struct memory_bitmap
*bm1
, *bm2
;
1084 if (forbidden_pages_map
&& free_pages_map
)
1087 BUG_ON(forbidden_pages_map
|| free_pages_map
);
1089 bm1
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
1093 error
= memory_bm_create(bm1
, GFP_KERNEL
, PG_ANY
);
1095 goto Free_first_object
;
1097 bm2
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
1099 goto Free_first_bitmap
;
1101 error
= memory_bm_create(bm2
, GFP_KERNEL
, PG_ANY
);
1103 goto Free_second_object
;
1105 forbidden_pages_map
= bm1
;
1106 free_pages_map
= bm2
;
1107 mark_nosave_pages(forbidden_pages_map
);
1109 pr_debug("Basic memory bitmaps created\n");
1116 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
1123 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1125 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1126 * auxiliary pointers are necessary so that the bitmaps themselves are not
1127 * referred to while they are being freed.
1129 void free_basic_memory_bitmaps(void)
1131 struct memory_bitmap
*bm1
, *bm2
;
1133 if (WARN_ON(!(forbidden_pages_map
&& free_pages_map
)))
1136 bm1
= forbidden_pages_map
;
1137 bm2
= free_pages_map
;
1138 forbidden_pages_map
= NULL
;
1139 free_pages_map
= NULL
;
1140 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
1142 memory_bm_free(bm2
, PG_UNSAFE_CLEAR
);
1145 pr_debug("Basic memory bitmaps freed\n");
1148 void clear_free_pages(void)
1150 #ifdef CONFIG_PAGE_POISONING_ZERO
1151 struct memory_bitmap
*bm
= free_pages_map
;
1154 if (WARN_ON(!(free_pages_map
)))
1157 memory_bm_position_reset(bm
);
1158 pfn
= memory_bm_next_pfn(bm
);
1159 while (pfn
!= BM_END_OF_MAP
) {
1161 clear_highpage(pfn_to_page(pfn
));
1163 pfn
= memory_bm_next_pfn(bm
);
1165 memory_bm_position_reset(bm
);
1166 pr_info("free pages cleared after restore\n");
1167 #endif /* PAGE_POISONING_ZERO */
1171 * snapshot_additional_pages - Estimate the number of extra pages needed.
1172 * @zone: Memory zone to carry out the computation for.
1174 * Estimate the number of additional pages needed for setting up a hibernation
1175 * image data structures for @zone (usually, the returned value is greater than
1176 * the exact number).
1178 unsigned int snapshot_additional_pages(struct zone
*zone
)
1180 unsigned int rtree
, nodes
;
1182 rtree
= nodes
= DIV_ROUND_UP(zone
->spanned_pages
, BM_BITS_PER_BLOCK
);
1183 rtree
+= DIV_ROUND_UP(rtree
* sizeof(struct rtree_node
),
1184 LINKED_PAGE_DATA_SIZE
);
1186 nodes
= DIV_ROUND_UP(nodes
, BM_ENTRIES_PER_LEVEL
);
1193 #ifdef CONFIG_HIGHMEM
1195 * count_free_highmem_pages - Compute the total number of free highmem pages.
1197 * The returned number is system-wide.
1199 static unsigned int count_free_highmem_pages(void)
1202 unsigned int cnt
= 0;
1204 for_each_populated_zone(zone
)
1205 if (is_highmem(zone
))
1206 cnt
+= zone_page_state(zone
, NR_FREE_PAGES
);
1212 * saveable_highmem_page - Check if a highmem page is saveable.
1214 * Determine whether a highmem page should be included in a hibernation image.
1216 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1217 * and it isn't part of a free chunk of pages.
1219 static struct page
*saveable_highmem_page(struct zone
*zone
, unsigned long pfn
)
1223 if (!pfn_valid(pfn
))
1226 page
= pfn_to_online_page(pfn
);
1227 if (!page
|| page_zone(page
) != zone
)
1230 BUG_ON(!PageHighMem(page
));
1232 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
))
1235 if (PageReserved(page
) || PageOffline(page
))
1238 if (page_is_guard(page
))
1245 * count_highmem_pages - Compute the total number of saveable highmem pages.
1247 static unsigned int count_highmem_pages(void)
1252 for_each_populated_zone(zone
) {
1253 unsigned long pfn
, max_zone_pfn
;
1255 if (!is_highmem(zone
))
1258 mark_free_pages(zone
);
1259 max_zone_pfn
= zone_end_pfn(zone
);
1260 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1261 if (saveable_highmem_page(zone
, pfn
))
1267 static inline void *saveable_highmem_page(struct zone
*z
, unsigned long p
)
1271 #endif /* CONFIG_HIGHMEM */
1274 * saveable_page - Check if the given page is saveable.
1276 * Determine whether a non-highmem page should be included in a hibernation
1279 * We should save the page if it isn't Nosave, and is not in the range
1280 * of pages statically defined as 'unsaveable', and it isn't part of
1281 * a free chunk of pages.
1283 static struct page
*saveable_page(struct zone
*zone
, unsigned long pfn
)
1287 if (!pfn_valid(pfn
))
1290 page
= pfn_to_online_page(pfn
);
1291 if (!page
|| page_zone(page
) != zone
)
1294 BUG_ON(PageHighMem(page
));
1296 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
))
1299 if (PageOffline(page
))
1302 if (PageReserved(page
)
1303 && (!kernel_page_present(page
) || pfn_is_nosave(pfn
)))
1306 if (page_is_guard(page
))
1313 * count_data_pages - Compute the total number of saveable non-highmem pages.
1315 static unsigned int count_data_pages(void)
1318 unsigned long pfn
, max_zone_pfn
;
1321 for_each_populated_zone(zone
) {
1322 if (is_highmem(zone
))
1325 mark_free_pages(zone
);
1326 max_zone_pfn
= zone_end_pfn(zone
);
1327 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1328 if (saveable_page(zone
, pfn
))
1335 * This is needed, because copy_page and memcpy are not usable for copying
1338 static inline void do_copy_page(long *dst
, long *src
)
1342 for (n
= PAGE_SIZE
/ sizeof(long); n
; n
--)
1347 * safe_copy_page - Copy a page in a safe way.
1349 * Check if the page we are going to copy is marked as present in the kernel
1350 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1351 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1352 * always returns 'true'.
1354 static void safe_copy_page(void *dst
, struct page
*s_page
)
1356 if (kernel_page_present(s_page
)) {
1357 do_copy_page(dst
, page_address(s_page
));
1359 kernel_map_pages(s_page
, 1, 1);
1360 do_copy_page(dst
, page_address(s_page
));
1361 kernel_map_pages(s_page
, 1, 0);
1365 #ifdef CONFIG_HIGHMEM
1366 static inline struct page
*page_is_saveable(struct zone
*zone
, unsigned long pfn
)
1368 return is_highmem(zone
) ?
1369 saveable_highmem_page(zone
, pfn
) : saveable_page(zone
, pfn
);
1372 static void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
1374 struct page
*s_page
, *d_page
;
1377 s_page
= pfn_to_page(src_pfn
);
1378 d_page
= pfn_to_page(dst_pfn
);
1379 if (PageHighMem(s_page
)) {
1380 src
= kmap_atomic(s_page
);
1381 dst
= kmap_atomic(d_page
);
1382 do_copy_page(dst
, src
);
1386 if (PageHighMem(d_page
)) {
1388 * The page pointed to by src may contain some kernel
1389 * data modified by kmap_atomic()
1391 safe_copy_page(buffer
, s_page
);
1392 dst
= kmap_atomic(d_page
);
1393 copy_page(dst
, buffer
);
1396 safe_copy_page(page_address(d_page
), s_page
);
1401 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1403 static inline void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
1405 safe_copy_page(page_address(pfn_to_page(dst_pfn
)),
1406 pfn_to_page(src_pfn
));
1408 #endif /* CONFIG_HIGHMEM */
1410 static void copy_data_pages(struct memory_bitmap
*copy_bm
,
1411 struct memory_bitmap
*orig_bm
)
1416 for_each_populated_zone(zone
) {
1417 unsigned long max_zone_pfn
;
1419 mark_free_pages(zone
);
1420 max_zone_pfn
= zone_end_pfn(zone
);
1421 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1422 if (page_is_saveable(zone
, pfn
))
1423 memory_bm_set_bit(orig_bm
, pfn
);
1425 memory_bm_position_reset(orig_bm
);
1426 memory_bm_position_reset(copy_bm
);
1428 pfn
= memory_bm_next_pfn(orig_bm
);
1429 if (unlikely(pfn
== BM_END_OF_MAP
))
1431 copy_data_page(memory_bm_next_pfn(copy_bm
), pfn
);
1435 /* Total number of image pages */
1436 static unsigned int nr_copy_pages
;
1437 /* Number of pages needed for saving the original pfns of the image pages */
1438 static unsigned int nr_meta_pages
;
1440 * Numbers of normal and highmem page frames allocated for hibernation image
1441 * before suspending devices.
1443 static unsigned int alloc_normal
, alloc_highmem
;
1445 * Memory bitmap used for marking saveable pages (during hibernation) or
1446 * hibernation image pages (during restore)
1448 static struct memory_bitmap orig_bm
;
1450 * Memory bitmap used during hibernation for marking allocated page frames that
1451 * will contain copies of saveable pages. During restore it is initially used
1452 * for marking hibernation image pages, but then the set bits from it are
1453 * duplicated in @orig_bm and it is released. On highmem systems it is next
1454 * used for marking "safe" highmem pages, but it has to be reinitialized for
1457 static struct memory_bitmap copy_bm
;
1460 * swsusp_free - Free pages allocated for hibernation image.
1462 * Image pages are alocated before snapshot creation, so they need to be
1463 * released after resume.
1465 void swsusp_free(void)
1467 unsigned long fb_pfn
, fr_pfn
;
1469 if (!forbidden_pages_map
|| !free_pages_map
)
1472 memory_bm_position_reset(forbidden_pages_map
);
1473 memory_bm_position_reset(free_pages_map
);
1476 fr_pfn
= memory_bm_next_pfn(free_pages_map
);
1477 fb_pfn
= memory_bm_next_pfn(forbidden_pages_map
);
1480 * Find the next bit set in both bitmaps. This is guaranteed to
1481 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1484 if (fb_pfn
< fr_pfn
)
1485 fb_pfn
= memory_bm_next_pfn(forbidden_pages_map
);
1486 if (fr_pfn
< fb_pfn
)
1487 fr_pfn
= memory_bm_next_pfn(free_pages_map
);
1488 } while (fb_pfn
!= fr_pfn
);
1490 if (fr_pfn
!= BM_END_OF_MAP
&& pfn_valid(fr_pfn
)) {
1491 struct page
*page
= pfn_to_page(fr_pfn
);
1493 memory_bm_clear_current(forbidden_pages_map
);
1494 memory_bm_clear_current(free_pages_map
);
1495 hibernate_restore_unprotect_page(page_address(page
));
1503 restore_pblist
= NULL
;
1507 hibernate_restore_protection_end();
1510 /* Helper functions used for the shrinking of memory. */
1512 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1515 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1516 * @nr_pages: Number of page frames to allocate.
1517 * @mask: GFP flags to use for the allocation.
1519 * Return value: Number of page frames actually allocated
1521 static unsigned long preallocate_image_pages(unsigned long nr_pages
, gfp_t mask
)
1523 unsigned long nr_alloc
= 0;
1525 while (nr_pages
> 0) {
1528 page
= alloc_image_page(mask
);
1531 memory_bm_set_bit(©_bm
, page_to_pfn(page
));
1532 if (PageHighMem(page
))
1543 static unsigned long preallocate_image_memory(unsigned long nr_pages
,
1544 unsigned long avail_normal
)
1546 unsigned long alloc
;
1548 if (avail_normal
<= alloc_normal
)
1551 alloc
= avail_normal
- alloc_normal
;
1552 if (nr_pages
< alloc
)
1555 return preallocate_image_pages(alloc
, GFP_IMAGE
);
1558 #ifdef CONFIG_HIGHMEM
1559 static unsigned long preallocate_image_highmem(unsigned long nr_pages
)
1561 return preallocate_image_pages(nr_pages
, GFP_IMAGE
| __GFP_HIGHMEM
);
1565 * __fraction - Compute (an approximation of) x * (multiplier / base).
1567 static unsigned long __fraction(u64 x
, u64 multiplier
, u64 base
)
1571 return (unsigned long)x
;
1574 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages
,
1575 unsigned long highmem
,
1576 unsigned long total
)
1578 unsigned long alloc
= __fraction(nr_pages
, highmem
, total
);
1580 return preallocate_image_pages(alloc
, GFP_IMAGE
| __GFP_HIGHMEM
);
1582 #else /* CONFIG_HIGHMEM */
1583 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages
)
1588 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages
,
1589 unsigned long highmem
,
1590 unsigned long total
)
1594 #endif /* CONFIG_HIGHMEM */
1597 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1599 static unsigned long free_unnecessary_pages(void)
1601 unsigned long save
, to_free_normal
, to_free_highmem
, free
;
1603 save
= count_data_pages();
1604 if (alloc_normal
>= save
) {
1605 to_free_normal
= alloc_normal
- save
;
1609 save
-= alloc_normal
;
1611 save
+= count_highmem_pages();
1612 if (alloc_highmem
>= save
) {
1613 to_free_highmem
= alloc_highmem
- save
;
1615 to_free_highmem
= 0;
1616 save
-= alloc_highmem
;
1617 if (to_free_normal
> save
)
1618 to_free_normal
-= save
;
1622 free
= to_free_normal
+ to_free_highmem
;
1624 memory_bm_position_reset(©_bm
);
1626 while (to_free_normal
> 0 || to_free_highmem
> 0) {
1627 unsigned long pfn
= memory_bm_next_pfn(©_bm
);
1628 struct page
*page
= pfn_to_page(pfn
);
1630 if (PageHighMem(page
)) {
1631 if (!to_free_highmem
)
1636 if (!to_free_normal
)
1641 memory_bm_clear_bit(©_bm
, pfn
);
1642 swsusp_unset_page_forbidden(page
);
1643 swsusp_unset_page_free(page
);
1651 * minimum_image_size - Estimate the minimum acceptable size of an image.
1652 * @saveable: Number of saveable pages in the system.
1654 * We want to avoid attempting to free too much memory too hard, so estimate the
1655 * minimum acceptable size of a hibernation image to use as the lower limit for
1656 * preallocating memory.
1658 * We assume that the minimum image size should be proportional to
1660 * [number of saveable pages] - [number of pages that can be freed in theory]
1662 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1663 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1665 static unsigned long minimum_image_size(unsigned long saveable
)
1669 size
= global_node_page_state(NR_SLAB_RECLAIMABLE
)
1670 + global_node_page_state(NR_ACTIVE_ANON
)
1671 + global_node_page_state(NR_INACTIVE_ANON
)
1672 + global_node_page_state(NR_ACTIVE_FILE
)
1673 + global_node_page_state(NR_INACTIVE_FILE
);
1675 return saveable
<= size
? 0 : saveable
- size
;
1679 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1681 * To create a hibernation image it is necessary to make a copy of every page
1682 * frame in use. We also need a number of page frames to be free during
1683 * hibernation for allocations made while saving the image and for device
1684 * drivers, in case they need to allocate memory from their hibernation
1685 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1686 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1687 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1688 * total number of available page frames and allocate at least
1690 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1691 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1693 * of them, which corresponds to the maximum size of a hibernation image.
1695 * If image_size is set below the number following from the above formula,
1696 * the preallocation of memory is continued until the total number of saveable
1697 * pages in the system is below the requested image size or the minimum
1698 * acceptable image size returned by minimum_image_size(), whichever is greater.
1700 int hibernate_preallocate_memory(void)
1703 unsigned long saveable
, size
, max_size
, count
, highmem
, pages
= 0;
1704 unsigned long alloc
, save_highmem
, pages_highmem
, avail_normal
;
1705 ktime_t start
, stop
;
1708 pr_info("Preallocating image memory... ");
1709 start
= ktime_get();
1711 error
= memory_bm_create(&orig_bm
, GFP_IMAGE
, PG_ANY
);
1715 error
= memory_bm_create(©_bm
, GFP_IMAGE
, PG_ANY
);
1722 /* Count the number of saveable data pages. */
1723 save_highmem
= count_highmem_pages();
1724 saveable
= count_data_pages();
1727 * Compute the total number of page frames we can use (count) and the
1728 * number of pages needed for image metadata (size).
1731 saveable
+= save_highmem
;
1732 highmem
= save_highmem
;
1734 for_each_populated_zone(zone
) {
1735 size
+= snapshot_additional_pages(zone
);
1736 if (is_highmem(zone
))
1737 highmem
+= zone_page_state(zone
, NR_FREE_PAGES
);
1739 count
+= zone_page_state(zone
, NR_FREE_PAGES
);
1741 avail_normal
= count
;
1743 count
-= totalreserve_pages
;
1745 /* Add number of pages required for page keys (s390 only). */
1746 size
+= page_key_additional_pages(saveable
);
1748 /* Compute the maximum number of saveable pages to leave in memory. */
1749 max_size
= (count
- (size
+ PAGES_FOR_IO
)) / 2
1750 - 2 * DIV_ROUND_UP(reserved_size
, PAGE_SIZE
);
1751 /* Compute the desired number of image pages specified by image_size. */
1752 size
= DIV_ROUND_UP(image_size
, PAGE_SIZE
);
1753 if (size
> max_size
)
1756 * If the desired number of image pages is at least as large as the
1757 * current number of saveable pages in memory, allocate page frames for
1758 * the image and we're done.
1760 if (size
>= saveable
) {
1761 pages
= preallocate_image_highmem(save_highmem
);
1762 pages
+= preallocate_image_memory(saveable
- pages
, avail_normal
);
1766 /* Estimate the minimum size of the image. */
1767 pages
= minimum_image_size(saveable
);
1769 * To avoid excessive pressure on the normal zone, leave room in it to
1770 * accommodate an image of the minimum size (unless it's already too
1771 * small, in which case don't preallocate pages from it at all).
1773 if (avail_normal
> pages
)
1774 avail_normal
-= pages
;
1778 size
= min_t(unsigned long, pages
, max_size
);
1781 * Let the memory management subsystem know that we're going to need a
1782 * large number of page frames to allocate and make it free some memory.
1783 * NOTE: If this is not done, performance will be hurt badly in some
1786 shrink_all_memory(saveable
- size
);
1789 * The number of saveable pages in memory was too high, so apply some
1790 * pressure to decrease it. First, make room for the largest possible
1791 * image and fail if that doesn't work. Next, try to decrease the size
1792 * of the image as much as indicated by 'size' using allocations from
1793 * highmem and non-highmem zones separately.
1795 pages_highmem
= preallocate_image_highmem(highmem
/ 2);
1796 alloc
= count
- max_size
;
1797 if (alloc
> pages_highmem
)
1798 alloc
-= pages_highmem
;
1801 pages
= preallocate_image_memory(alloc
, avail_normal
);
1802 if (pages
< alloc
) {
1803 /* We have exhausted non-highmem pages, try highmem. */
1805 pages
+= pages_highmem
;
1806 pages_highmem
= preallocate_image_highmem(alloc
);
1807 if (pages_highmem
< alloc
)
1809 pages
+= pages_highmem
;
1811 * size is the desired number of saveable pages to leave in
1812 * memory, so try to preallocate (all memory - size) pages.
1814 alloc
= (count
- pages
) - size
;
1815 pages
+= preallocate_image_highmem(alloc
);
1818 * There are approximately max_size saveable pages at this point
1819 * and we want to reduce this number down to size.
1821 alloc
= max_size
- size
;
1822 size
= preallocate_highmem_fraction(alloc
, highmem
, count
);
1823 pages_highmem
+= size
;
1825 size
= preallocate_image_memory(alloc
, avail_normal
);
1826 pages_highmem
+= preallocate_image_highmem(alloc
- size
);
1827 pages
+= pages_highmem
+ size
;
1831 * We only need as many page frames for the image as there are saveable
1832 * pages in memory, but we have allocated more. Release the excessive
1835 pages
-= free_unnecessary_pages();
1839 pr_cont("done (allocated %lu pages)\n", pages
);
1840 swsusp_show_speed(start
, stop
, pages
, "Allocated");
1850 #ifdef CONFIG_HIGHMEM
1852 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1854 * Compute the number of non-highmem pages that will be necessary for creating
1855 * copies of highmem pages.
1857 static unsigned int count_pages_for_highmem(unsigned int nr_highmem
)
1859 unsigned int free_highmem
= count_free_highmem_pages() + alloc_highmem
;
1861 if (free_highmem
>= nr_highmem
)
1864 nr_highmem
-= free_highmem
;
1869 static unsigned int count_pages_for_highmem(unsigned int nr_highmem
) { return 0; }
1870 #endif /* CONFIG_HIGHMEM */
1873 * enough_free_mem - Check if there is enough free memory for the image.
1875 static int enough_free_mem(unsigned int nr_pages
, unsigned int nr_highmem
)
1878 unsigned int free
= alloc_normal
;
1880 for_each_populated_zone(zone
)
1881 if (!is_highmem(zone
))
1882 free
+= zone_page_state(zone
, NR_FREE_PAGES
);
1884 nr_pages
+= count_pages_for_highmem(nr_highmem
);
1885 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1886 nr_pages
, PAGES_FOR_IO
, free
);
1888 return free
> nr_pages
+ PAGES_FOR_IO
;
1891 #ifdef CONFIG_HIGHMEM
1893 * get_highmem_buffer - Allocate a buffer for highmem pages.
1895 * If there are some highmem pages in the hibernation image, we may need a
1896 * buffer to copy them and/or load their data.
1898 static inline int get_highmem_buffer(int safe_needed
)
1900 buffer
= get_image_page(GFP_ATOMIC
, safe_needed
);
1901 return buffer
? 0 : -ENOMEM
;
1905 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1907 * Try to allocate as many pages as needed, but if the number of free highmem
1908 * pages is less than that, allocate them all.
1910 static inline unsigned int alloc_highmem_pages(struct memory_bitmap
*bm
,
1911 unsigned int nr_highmem
)
1913 unsigned int to_alloc
= count_free_highmem_pages();
1915 if (to_alloc
> nr_highmem
)
1916 to_alloc
= nr_highmem
;
1918 nr_highmem
-= to_alloc
;
1919 while (to_alloc
-- > 0) {
1922 page
= alloc_image_page(__GFP_HIGHMEM
|__GFP_KSWAPD_RECLAIM
);
1923 memory_bm_set_bit(bm
, page_to_pfn(page
));
1928 static inline int get_highmem_buffer(int safe_needed
) { return 0; }
1930 static inline unsigned int alloc_highmem_pages(struct memory_bitmap
*bm
,
1931 unsigned int n
) { return 0; }
1932 #endif /* CONFIG_HIGHMEM */
1935 * swsusp_alloc - Allocate memory for hibernation image.
1937 * We first try to allocate as many highmem pages as there are
1938 * saveable highmem pages in the system. If that fails, we allocate
1939 * non-highmem pages for the copies of the remaining highmem ones.
1941 * In this approach it is likely that the copies of highmem pages will
1942 * also be located in the high memory, because of the way in which
1943 * copy_data_pages() works.
1945 static int swsusp_alloc(struct memory_bitmap
*copy_bm
,
1946 unsigned int nr_pages
, unsigned int nr_highmem
)
1948 if (nr_highmem
> 0) {
1949 if (get_highmem_buffer(PG_ANY
))
1951 if (nr_highmem
> alloc_highmem
) {
1952 nr_highmem
-= alloc_highmem
;
1953 nr_pages
+= alloc_highmem_pages(copy_bm
, nr_highmem
);
1956 if (nr_pages
> alloc_normal
) {
1957 nr_pages
-= alloc_normal
;
1958 while (nr_pages
-- > 0) {
1961 page
= alloc_image_page(GFP_ATOMIC
);
1964 memory_bm_set_bit(copy_bm
, page_to_pfn(page
));
1975 asmlinkage __visible
int swsusp_save(void)
1977 unsigned int nr_pages
, nr_highmem
;
1979 pr_info("Creating hibernation image:\n");
1981 drain_local_pages(NULL
);
1982 nr_pages
= count_data_pages();
1983 nr_highmem
= count_highmem_pages();
1984 pr_info("Need to copy %u pages\n", nr_pages
+ nr_highmem
);
1986 if (!enough_free_mem(nr_pages
, nr_highmem
)) {
1987 pr_err("Not enough free memory\n");
1991 if (swsusp_alloc(©_bm
, nr_pages
, nr_highmem
)) {
1992 pr_err("Memory allocation failed\n");
1997 * During allocating of suspend pagedir, new cold pages may appear.
2000 drain_local_pages(NULL
);
2001 copy_data_pages(©_bm
, &orig_bm
);
2004 * End of critical section. From now on, we can write to memory,
2005 * but we should not touch disk. This specially means we must _not_
2006 * touch swap space! Except we must write out our image of course.
2009 nr_pages
+= nr_highmem
;
2010 nr_copy_pages
= nr_pages
;
2011 nr_meta_pages
= DIV_ROUND_UP(nr_pages
* sizeof(long), PAGE_SIZE
);
2013 pr_info("Hibernation image created (%d pages copied)\n", nr_pages
);
2018 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2019 static int init_header_complete(struct swsusp_info
*info
)
2021 memcpy(&info
->uts
, init_utsname(), sizeof(struct new_utsname
));
2022 info
->version_code
= LINUX_VERSION_CODE
;
2026 static char *check_image_kernel(struct swsusp_info
*info
)
2028 if (info
->version_code
!= LINUX_VERSION_CODE
)
2029 return "kernel version";
2030 if (strcmp(info
->uts
.sysname
,init_utsname()->sysname
))
2031 return "system type";
2032 if (strcmp(info
->uts
.release
,init_utsname()->release
))
2033 return "kernel release";
2034 if (strcmp(info
->uts
.version
,init_utsname()->version
))
2036 if (strcmp(info
->uts
.machine
,init_utsname()->machine
))
2040 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2042 unsigned long snapshot_get_image_size(void)
2044 return nr_copy_pages
+ nr_meta_pages
+ 1;
2047 static int init_header(struct swsusp_info
*info
)
2049 memset(info
, 0, sizeof(struct swsusp_info
));
2050 info
->num_physpages
= get_num_physpages();
2051 info
->image_pages
= nr_copy_pages
;
2052 info
->pages
= snapshot_get_image_size();
2053 info
->size
= info
->pages
;
2054 info
->size
<<= PAGE_SHIFT
;
2055 return init_header_complete(info
);
2059 * pack_pfns - Prepare PFNs for saving.
2060 * @bm: Memory bitmap.
2061 * @buf: Memory buffer to store the PFNs in.
2063 * PFNs corresponding to set bits in @bm are stored in the area of memory
2064 * pointed to by @buf (1 page at a time).
2066 static inline void pack_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
2070 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
2071 buf
[j
] = memory_bm_next_pfn(bm
);
2072 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
2074 /* Save page key for data page (s390 only). */
2075 page_key_read(buf
+ j
);
2080 * snapshot_read_next - Get the address to read the next image page from.
2081 * @handle: Snapshot handle to be used for the reading.
2083 * On the first call, @handle should point to a zeroed snapshot_handle
2084 * structure. The structure gets populated then and a pointer to it should be
2085 * passed to this function every next time.
2087 * On success, the function returns a positive number. Then, the caller
2088 * is allowed to read up to the returned number of bytes from the memory
2089 * location computed by the data_of() macro.
2091 * The function returns 0 to indicate the end of the data stream condition,
2092 * and negative numbers are returned on errors. If that happens, the structure
2093 * pointed to by @handle is not updated and should not be used any more.
2095 int snapshot_read_next(struct snapshot_handle
*handle
)
2097 if (handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
2101 /* This makes the buffer be freed by swsusp_free() */
2102 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
2109 error
= init_header((struct swsusp_info
*)buffer
);
2112 handle
->buffer
= buffer
;
2113 memory_bm_position_reset(&orig_bm
);
2114 memory_bm_position_reset(©_bm
);
2115 } else if (handle
->cur
<= nr_meta_pages
) {
2117 pack_pfns(buffer
, &orig_bm
);
2121 page
= pfn_to_page(memory_bm_next_pfn(©_bm
));
2122 if (PageHighMem(page
)) {
2124 * Highmem pages are copied to the buffer,
2125 * because we can't return with a kmapped
2126 * highmem page (we may not be called again).
2130 kaddr
= kmap_atomic(page
);
2131 copy_page(buffer
, kaddr
);
2132 kunmap_atomic(kaddr
);
2133 handle
->buffer
= buffer
;
2135 handle
->buffer
= page_address(page
);
2142 static void duplicate_memory_bitmap(struct memory_bitmap
*dst
,
2143 struct memory_bitmap
*src
)
2147 memory_bm_position_reset(src
);
2148 pfn
= memory_bm_next_pfn(src
);
2149 while (pfn
!= BM_END_OF_MAP
) {
2150 memory_bm_set_bit(dst
, pfn
);
2151 pfn
= memory_bm_next_pfn(src
);
2156 * mark_unsafe_pages - Mark pages that were used before hibernation.
2158 * Mark the pages that cannot be used for storing the image during restoration,
2159 * because they conflict with the pages that had been used before hibernation.
2161 static void mark_unsafe_pages(struct memory_bitmap
*bm
)
2165 /* Clear the "free"/"unsafe" bit for all PFNs */
2166 memory_bm_position_reset(free_pages_map
);
2167 pfn
= memory_bm_next_pfn(free_pages_map
);
2168 while (pfn
!= BM_END_OF_MAP
) {
2169 memory_bm_clear_current(free_pages_map
);
2170 pfn
= memory_bm_next_pfn(free_pages_map
);
2173 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2174 duplicate_memory_bitmap(free_pages_map
, bm
);
2176 allocated_unsafe_pages
= 0;
2179 static int check_header(struct swsusp_info
*info
)
2183 reason
= check_image_kernel(info
);
2184 if (!reason
&& info
->num_physpages
!= get_num_physpages())
2185 reason
= "memory size";
2187 pr_err("Image mismatch: %s\n", reason
);
2194 * load header - Check the image header and copy the data from it.
2196 static int load_header(struct swsusp_info
*info
)
2200 restore_pblist
= NULL
;
2201 error
= check_header(info
);
2203 nr_copy_pages
= info
->image_pages
;
2204 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
2210 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2211 * @bm: Memory bitmap.
2212 * @buf: Area of memory containing the PFNs.
2214 * For each element of the array pointed to by @buf (1 page at a time), set the
2215 * corresponding bit in @bm.
2217 static int unpack_orig_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
2221 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
2222 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
2225 /* Extract and buffer page key for data page (s390 only). */
2226 page_key_memorize(buf
+ j
);
2228 if (pfn_valid(buf
[j
]) && memory_bm_pfn_present(bm
, buf
[j
]))
2229 memory_bm_set_bit(bm
, buf
[j
]);
2237 #ifdef CONFIG_HIGHMEM
2239 * struct highmem_pbe is used for creating the list of highmem pages that
2240 * should be restored atomically during the resume from disk, because the page
2241 * frames they have occupied before the suspend are in use.
2243 struct highmem_pbe
{
2244 struct page
*copy_page
; /* data is here now */
2245 struct page
*orig_page
; /* data was here before the suspend */
2246 struct highmem_pbe
*next
;
2250 * List of highmem PBEs needed for restoring the highmem pages that were
2251 * allocated before the suspend and included in the suspend image, but have
2252 * also been allocated by the "resume" kernel, so their contents cannot be
2253 * written directly to their "original" page frames.
2255 static struct highmem_pbe
*highmem_pblist
;
2258 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2259 * @bm: Memory bitmap.
2261 * The bits in @bm that correspond to image pages are assumed to be set.
2263 static unsigned int count_highmem_image_pages(struct memory_bitmap
*bm
)
2266 unsigned int cnt
= 0;
2268 memory_bm_position_reset(bm
);
2269 pfn
= memory_bm_next_pfn(bm
);
2270 while (pfn
!= BM_END_OF_MAP
) {
2271 if (PageHighMem(pfn_to_page(pfn
)))
2274 pfn
= memory_bm_next_pfn(bm
);
2279 static unsigned int safe_highmem_pages
;
2281 static struct memory_bitmap
*safe_highmem_bm
;
2284 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2285 * @bm: Pointer to an uninitialized memory bitmap structure.
2286 * @nr_highmem_p: Pointer to the number of highmem image pages.
2288 * Try to allocate as many highmem pages as there are highmem image pages
2289 * (@nr_highmem_p points to the variable containing the number of highmem image
2290 * pages). The pages that are "safe" (ie. will not be overwritten when the
2291 * hibernation image is restored entirely) have the corresponding bits set in
2292 * @bm (it must be unitialized).
2294 * NOTE: This function should not be called if there are no highmem image pages.
2296 static int prepare_highmem_image(struct memory_bitmap
*bm
,
2297 unsigned int *nr_highmem_p
)
2299 unsigned int to_alloc
;
2301 if (memory_bm_create(bm
, GFP_ATOMIC
, PG_SAFE
))
2304 if (get_highmem_buffer(PG_SAFE
))
2307 to_alloc
= count_free_highmem_pages();
2308 if (to_alloc
> *nr_highmem_p
)
2309 to_alloc
= *nr_highmem_p
;
2311 *nr_highmem_p
= to_alloc
;
2313 safe_highmem_pages
= 0;
2314 while (to_alloc
-- > 0) {
2317 page
= alloc_page(__GFP_HIGHMEM
);
2318 if (!swsusp_page_is_free(page
)) {
2319 /* The page is "safe", set its bit the bitmap */
2320 memory_bm_set_bit(bm
, page_to_pfn(page
));
2321 safe_highmem_pages
++;
2323 /* Mark the page as allocated */
2324 swsusp_set_page_forbidden(page
);
2325 swsusp_set_page_free(page
);
2327 memory_bm_position_reset(bm
);
2328 safe_highmem_bm
= bm
;
2332 static struct page
*last_highmem_page
;
2335 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2337 * For a given highmem image page get a buffer that suspend_write_next() should
2338 * return to its caller to write to.
2340 * If the page is to be saved to its "original" page frame or a copy of
2341 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2342 * the copy of the page is to be made in normal memory, so the address of
2343 * the copy is returned.
2345 * If @buffer is returned, the caller of suspend_write_next() will write
2346 * the page's contents to @buffer, so they will have to be copied to the
2347 * right location on the next call to suspend_write_next() and it is done
2348 * with the help of copy_last_highmem_page(). For this purpose, if
2349 * @buffer is returned, @last_highmem_page is set to the page to which
2350 * the data will have to be copied from @buffer.
2352 static void *get_highmem_page_buffer(struct page
*page
,
2353 struct chain_allocator
*ca
)
2355 struct highmem_pbe
*pbe
;
2358 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
)) {
2360 * We have allocated the "original" page frame and we can
2361 * use it directly to store the loaded page.
2363 last_highmem_page
= page
;
2367 * The "original" page frame has not been allocated and we have to
2368 * use a "safe" page frame to store the loaded page.
2370 pbe
= chain_alloc(ca
, sizeof(struct highmem_pbe
));
2373 return ERR_PTR(-ENOMEM
);
2375 pbe
->orig_page
= page
;
2376 if (safe_highmem_pages
> 0) {
2379 /* Copy of the page will be stored in high memory */
2381 tmp
= pfn_to_page(memory_bm_next_pfn(safe_highmem_bm
));
2382 safe_highmem_pages
--;
2383 last_highmem_page
= tmp
;
2384 pbe
->copy_page
= tmp
;
2386 /* Copy of the page will be stored in normal memory */
2387 kaddr
= safe_pages_list
;
2388 safe_pages_list
= safe_pages_list
->next
;
2389 pbe
->copy_page
= virt_to_page(kaddr
);
2391 pbe
->next
= highmem_pblist
;
2392 highmem_pblist
= pbe
;
2397 * copy_last_highmem_page - Copy most the most recent highmem image page.
2399 * Copy the contents of a highmem image from @buffer, where the caller of
2400 * snapshot_write_next() has stored them, to the right location represented by
2401 * @last_highmem_page .
2403 static void copy_last_highmem_page(void)
2405 if (last_highmem_page
) {
2408 dst
= kmap_atomic(last_highmem_page
);
2409 copy_page(dst
, buffer
);
2411 last_highmem_page
= NULL
;
2415 static inline int last_highmem_page_copied(void)
2417 return !last_highmem_page
;
2420 static inline void free_highmem_data(void)
2422 if (safe_highmem_bm
)
2423 memory_bm_free(safe_highmem_bm
, PG_UNSAFE_CLEAR
);
2426 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
2429 static unsigned int count_highmem_image_pages(struct memory_bitmap
*bm
) { return 0; }
2431 static inline int prepare_highmem_image(struct memory_bitmap
*bm
,
2432 unsigned int *nr_highmem_p
) { return 0; }
2434 static inline void *get_highmem_page_buffer(struct page
*page
,
2435 struct chain_allocator
*ca
)
2437 return ERR_PTR(-EINVAL
);
2440 static inline void copy_last_highmem_page(void) {}
2441 static inline int last_highmem_page_copied(void) { return 1; }
2442 static inline void free_highmem_data(void) {}
2443 #endif /* CONFIG_HIGHMEM */
2445 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2448 * prepare_image - Make room for loading hibernation image.
2449 * @new_bm: Unitialized memory bitmap structure.
2450 * @bm: Memory bitmap with unsafe pages marked.
2452 * Use @bm to mark the pages that will be overwritten in the process of
2453 * restoring the system memory state from the suspend image ("unsafe" pages)
2454 * and allocate memory for the image.
2456 * The idea is to allocate a new memory bitmap first and then allocate
2457 * as many pages as needed for image data, but without specifying what those
2458 * pages will be used for just yet. Instead, we mark them all as allocated and
2459 * create a lists of "safe" pages to be used later. On systems with high
2460 * memory a list of "safe" highmem pages is created too.
2462 static int prepare_image(struct memory_bitmap
*new_bm
, struct memory_bitmap
*bm
)
2464 unsigned int nr_pages
, nr_highmem
;
2465 struct linked_page
*lp
;
2468 /* If there is no highmem, the buffer will not be necessary */
2469 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
2472 nr_highmem
= count_highmem_image_pages(bm
);
2473 mark_unsafe_pages(bm
);
2475 error
= memory_bm_create(new_bm
, GFP_ATOMIC
, PG_SAFE
);
2479 duplicate_memory_bitmap(new_bm
, bm
);
2480 memory_bm_free(bm
, PG_UNSAFE_KEEP
);
2481 if (nr_highmem
> 0) {
2482 error
= prepare_highmem_image(bm
, &nr_highmem
);
2487 * Reserve some safe pages for potential later use.
2489 * NOTE: This way we make sure there will be enough safe pages for the
2490 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2491 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2493 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2495 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
2496 nr_pages
= DIV_ROUND_UP(nr_pages
, PBES_PER_LINKED_PAGE
);
2497 while (nr_pages
> 0) {
2498 lp
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
2503 lp
->next
= safe_pages_list
;
2504 safe_pages_list
= lp
;
2507 /* Preallocate memory for the image */
2508 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
2509 while (nr_pages
> 0) {
2510 lp
= (struct linked_page
*)get_zeroed_page(GFP_ATOMIC
);
2515 if (!swsusp_page_is_free(virt_to_page(lp
))) {
2516 /* The page is "safe", add it to the list */
2517 lp
->next
= safe_pages_list
;
2518 safe_pages_list
= lp
;
2520 /* Mark the page as allocated */
2521 swsusp_set_page_forbidden(virt_to_page(lp
));
2522 swsusp_set_page_free(virt_to_page(lp
));
2533 * get_buffer - Get the address to store the next image data page.
2535 * Get the address that snapshot_write_next() should return to its caller to
2538 static void *get_buffer(struct memory_bitmap
*bm
, struct chain_allocator
*ca
)
2542 unsigned long pfn
= memory_bm_next_pfn(bm
);
2544 if (pfn
== BM_END_OF_MAP
)
2545 return ERR_PTR(-EFAULT
);
2547 page
= pfn_to_page(pfn
);
2548 if (PageHighMem(page
))
2549 return get_highmem_page_buffer(page
, ca
);
2551 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
))
2553 * We have allocated the "original" page frame and we can
2554 * use it directly to store the loaded page.
2556 return page_address(page
);
2559 * The "original" page frame has not been allocated and we have to
2560 * use a "safe" page frame to store the loaded page.
2562 pbe
= chain_alloc(ca
, sizeof(struct pbe
));
2565 return ERR_PTR(-ENOMEM
);
2567 pbe
->orig_address
= page_address(page
);
2568 pbe
->address
= safe_pages_list
;
2569 safe_pages_list
= safe_pages_list
->next
;
2570 pbe
->next
= restore_pblist
;
2571 restore_pblist
= pbe
;
2572 return pbe
->address
;
2576 * snapshot_write_next - Get the address to store the next image page.
2577 * @handle: Snapshot handle structure to guide the writing.
2579 * On the first call, @handle should point to a zeroed snapshot_handle
2580 * structure. The structure gets populated then and a pointer to it should be
2581 * passed to this function every next time.
2583 * On success, the function returns a positive number. Then, the caller
2584 * is allowed to write up to the returned number of bytes to the memory
2585 * location computed by the data_of() macro.
2587 * The function returns 0 to indicate the "end of file" condition. Negative
2588 * numbers are returned on errors, in which cases the structure pointed to by
2589 * @handle is not updated and should not be used any more.
2591 int snapshot_write_next(struct snapshot_handle
*handle
)
2593 static struct chain_allocator ca
;
2596 /* Check if we have already loaded the entire image */
2597 if (handle
->cur
> 1 && handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
2600 handle
->sync_read
= 1;
2604 /* This makes the buffer be freed by swsusp_free() */
2605 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
2610 handle
->buffer
= buffer
;
2611 } else if (handle
->cur
== 1) {
2612 error
= load_header(buffer
);
2616 safe_pages_list
= NULL
;
2618 error
= memory_bm_create(©_bm
, GFP_ATOMIC
, PG_ANY
);
2622 /* Allocate buffer for page keys. */
2623 error
= page_key_alloc(nr_copy_pages
);
2627 hibernate_restore_protection_begin();
2628 } else if (handle
->cur
<= nr_meta_pages
+ 1) {
2629 error
= unpack_orig_pfns(buffer
, ©_bm
);
2633 if (handle
->cur
== nr_meta_pages
+ 1) {
2634 error
= prepare_image(&orig_bm
, ©_bm
);
2638 chain_init(&ca
, GFP_ATOMIC
, PG_SAFE
);
2639 memory_bm_position_reset(&orig_bm
);
2640 restore_pblist
= NULL
;
2641 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
2642 handle
->sync_read
= 0;
2643 if (IS_ERR(handle
->buffer
))
2644 return PTR_ERR(handle
->buffer
);
2647 copy_last_highmem_page();
2648 /* Restore page key for data page (s390 only). */
2649 page_key_write(handle
->buffer
);
2650 hibernate_restore_protect_page(handle
->buffer
);
2651 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
2652 if (IS_ERR(handle
->buffer
))
2653 return PTR_ERR(handle
->buffer
);
2654 if (handle
->buffer
!= buffer
)
2655 handle
->sync_read
= 0;
2662 * snapshot_write_finalize - Complete the loading of a hibernation image.
2664 * Must be called after the last call to snapshot_write_next() in case the last
2665 * page in the image happens to be a highmem page and its contents should be
2666 * stored in highmem. Additionally, it recycles bitmap memory that's not
2667 * necessary any more.
2669 void snapshot_write_finalize(struct snapshot_handle
*handle
)
2671 copy_last_highmem_page();
2672 /* Restore page key for data page (s390 only). */
2673 page_key_write(handle
->buffer
);
2675 hibernate_restore_protect_page(handle
->buffer
);
2676 /* Do that only if we have loaded the image entirely */
2677 if (handle
->cur
> 1 && handle
->cur
> nr_meta_pages
+ nr_copy_pages
) {
2678 memory_bm_recycle(&orig_bm
);
2679 free_highmem_data();
2683 int snapshot_image_loaded(struct snapshot_handle
*handle
)
2685 return !(!nr_copy_pages
|| !last_highmem_page_copied() ||
2686 handle
->cur
<= nr_meta_pages
+ nr_copy_pages
);
2689 #ifdef CONFIG_HIGHMEM
2690 /* Assumes that @buf is ready and points to a "safe" page */
2691 static inline void swap_two_pages_data(struct page
*p1
, struct page
*p2
,
2694 void *kaddr1
, *kaddr2
;
2696 kaddr1
= kmap_atomic(p1
);
2697 kaddr2
= kmap_atomic(p2
);
2698 copy_page(buf
, kaddr1
);
2699 copy_page(kaddr1
, kaddr2
);
2700 copy_page(kaddr2
, buf
);
2701 kunmap_atomic(kaddr2
);
2702 kunmap_atomic(kaddr1
);
2706 * restore_highmem - Put highmem image pages into their original locations.
2708 * For each highmem page that was in use before hibernation and is included in
2709 * the image, and also has been allocated by the "restore" kernel, swap its
2710 * current contents with the previous (ie. "before hibernation") ones.
2712 * If the restore eventually fails, we can call this function once again and
2713 * restore the highmem state as seen by the restore kernel.
2715 int restore_highmem(void)
2717 struct highmem_pbe
*pbe
= highmem_pblist
;
2723 buf
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
2728 swap_two_pages_data(pbe
->copy_page
, pbe
->orig_page
, buf
);
2731 free_image_page(buf
, PG_UNSAFE_CLEAR
);
2734 #endif /* CONFIG_HIGHMEM */