mm/slab: sanity-check page type when looking up cache
[linux/fpc-iii.git] / kernel / power / snapshot.c
blob83105874f255b235213004ca356c50802eb143f5
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/power/snapshot.c
5 * This file provides system snapshot/restore functionality for swsusp.
7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 */
11 #define pr_fmt(fmt) "PM: " fmt
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/memblock.h>
25 #include <linux/nmi.h>
26 #include <linux/syscalls.h>
27 #include <linux/console.h>
28 #include <linux/highmem.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
31 #include <linux/compiler.h>
32 #include <linux/ktime.h>
33 #include <linux/set_memory.h>
35 #include <linux/uaccess.h>
36 #include <asm/mmu_context.h>
37 #include <asm/pgtable.h>
38 #include <asm/tlbflush.h>
39 #include <asm/io.h>
41 #include "power.h"
43 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
44 static bool hibernate_restore_protection;
45 static bool hibernate_restore_protection_active;
47 void enable_restore_image_protection(void)
49 hibernate_restore_protection = true;
52 static inline void hibernate_restore_protection_begin(void)
54 hibernate_restore_protection_active = hibernate_restore_protection;
57 static inline void hibernate_restore_protection_end(void)
59 hibernate_restore_protection_active = false;
62 static inline void hibernate_restore_protect_page(void *page_address)
64 if (hibernate_restore_protection_active)
65 set_memory_ro((unsigned long)page_address, 1);
68 static inline void hibernate_restore_unprotect_page(void *page_address)
70 if (hibernate_restore_protection_active)
71 set_memory_rw((unsigned long)page_address, 1);
73 #else
74 static inline void hibernate_restore_protection_begin(void) {}
75 static inline void hibernate_restore_protection_end(void) {}
76 static inline void hibernate_restore_protect_page(void *page_address) {}
77 static inline void hibernate_restore_unprotect_page(void *page_address) {}
78 #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
80 static int swsusp_page_is_free(struct page *);
81 static void swsusp_set_page_forbidden(struct page *);
82 static void swsusp_unset_page_forbidden(struct page *);
85 * Number of bytes to reserve for memory allocations made by device drivers
86 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
87 * cause image creation to fail (tunable via /sys/power/reserved_size).
89 unsigned long reserved_size;
91 void __init hibernate_reserved_size_init(void)
93 reserved_size = SPARE_PAGES * PAGE_SIZE;
97 * Preferred image size in bytes (tunable via /sys/power/image_size).
98 * When it is set to N, swsusp will do its best to ensure the image
99 * size will not exceed N bytes, but if that is impossible, it will
100 * try to create the smallest image possible.
102 unsigned long image_size;
104 void __init hibernate_image_size_init(void)
106 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
110 * List of PBEs needed for restoring the pages that were allocated before
111 * the suspend and included in the suspend image, but have also been
112 * allocated by the "resume" kernel, so their contents cannot be written
113 * directly to their "original" page frames.
115 struct pbe *restore_pblist;
117 /* struct linked_page is used to build chains of pages */
119 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
121 struct linked_page {
122 struct linked_page *next;
123 char data[LINKED_PAGE_DATA_SIZE];
124 } __packed;
127 * List of "safe" pages (ie. pages that were not used by the image kernel
128 * before hibernation) that may be used as temporary storage for image kernel
129 * memory contents.
131 static struct linked_page *safe_pages_list;
133 /* Pointer to an auxiliary buffer (1 page) */
134 static void *buffer;
136 #define PG_ANY 0
137 #define PG_SAFE 1
138 #define PG_UNSAFE_CLEAR 1
139 #define PG_UNSAFE_KEEP 0
141 static unsigned int allocated_unsafe_pages;
144 * get_image_page - Allocate a page for a hibernation image.
145 * @gfp_mask: GFP mask for the allocation.
146 * @safe_needed: Get pages that were not used before hibernation (restore only)
148 * During image restoration, for storing the PBE list and the image data, we can
149 * only use memory pages that do not conflict with the pages used before
150 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
151 * using allocated_unsafe_pages.
153 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
154 * swsusp_free() can release it.
156 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
158 void *res;
160 res = (void *)get_zeroed_page(gfp_mask);
161 if (safe_needed)
162 while (res && swsusp_page_is_free(virt_to_page(res))) {
163 /* The page is unsafe, mark it for swsusp_free() */
164 swsusp_set_page_forbidden(virt_to_page(res));
165 allocated_unsafe_pages++;
166 res = (void *)get_zeroed_page(gfp_mask);
168 if (res) {
169 swsusp_set_page_forbidden(virt_to_page(res));
170 swsusp_set_page_free(virt_to_page(res));
172 return res;
175 static void *__get_safe_page(gfp_t gfp_mask)
177 if (safe_pages_list) {
178 void *ret = safe_pages_list;
180 safe_pages_list = safe_pages_list->next;
181 memset(ret, 0, PAGE_SIZE);
182 return ret;
184 return get_image_page(gfp_mask, PG_SAFE);
187 unsigned long get_safe_page(gfp_t gfp_mask)
189 return (unsigned long)__get_safe_page(gfp_mask);
192 static struct page *alloc_image_page(gfp_t gfp_mask)
194 struct page *page;
196 page = alloc_page(gfp_mask);
197 if (page) {
198 swsusp_set_page_forbidden(page);
199 swsusp_set_page_free(page);
201 return page;
204 static void recycle_safe_page(void *page_address)
206 struct linked_page *lp = page_address;
208 lp->next = safe_pages_list;
209 safe_pages_list = lp;
213 * free_image_page - Free a page allocated for hibernation image.
214 * @addr: Address of the page to free.
215 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
217 * The page to free should have been allocated by get_image_page() (page flags
218 * set by it are affected).
220 static inline void free_image_page(void *addr, int clear_nosave_free)
222 struct page *page;
224 BUG_ON(!virt_addr_valid(addr));
226 page = virt_to_page(addr);
228 swsusp_unset_page_forbidden(page);
229 if (clear_nosave_free)
230 swsusp_unset_page_free(page);
232 __free_page(page);
235 static inline void free_list_of_pages(struct linked_page *list,
236 int clear_page_nosave)
238 while (list) {
239 struct linked_page *lp = list->next;
241 free_image_page(list, clear_page_nosave);
242 list = lp;
247 * struct chain_allocator is used for allocating small objects out of
248 * a linked list of pages called 'the chain'.
250 * The chain grows each time when there is no room for a new object in
251 * the current page. The allocated objects cannot be freed individually.
252 * It is only possible to free them all at once, by freeing the entire
253 * chain.
255 * NOTE: The chain allocator may be inefficient if the allocated objects
256 * are not much smaller than PAGE_SIZE.
258 struct chain_allocator {
259 struct linked_page *chain; /* the chain */
260 unsigned int used_space; /* total size of objects allocated out
261 of the current page */
262 gfp_t gfp_mask; /* mask for allocating pages */
263 int safe_needed; /* if set, only "safe" pages are allocated */
266 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
267 int safe_needed)
269 ca->chain = NULL;
270 ca->used_space = LINKED_PAGE_DATA_SIZE;
271 ca->gfp_mask = gfp_mask;
272 ca->safe_needed = safe_needed;
275 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
277 void *ret;
279 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
280 struct linked_page *lp;
282 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
283 get_image_page(ca->gfp_mask, PG_ANY);
284 if (!lp)
285 return NULL;
287 lp->next = ca->chain;
288 ca->chain = lp;
289 ca->used_space = 0;
291 ret = ca->chain->data + ca->used_space;
292 ca->used_space += size;
293 return ret;
297 * Data types related to memory bitmaps.
299 * Memory bitmap is a structure consiting of many linked lists of
300 * objects. The main list's elements are of type struct zone_bitmap
301 * and each of them corresonds to one zone. For each zone bitmap
302 * object there is a list of objects of type struct bm_block that
303 * represent each blocks of bitmap in which information is stored.
305 * struct memory_bitmap contains a pointer to the main list of zone
306 * bitmap objects, a struct bm_position used for browsing the bitmap,
307 * and a pointer to the list of pages used for allocating all of the
308 * zone bitmap objects and bitmap block objects.
310 * NOTE: It has to be possible to lay out the bitmap in memory
311 * using only allocations of order 0. Additionally, the bitmap is
312 * designed to work with arbitrary number of zones (this is over the
313 * top for now, but let's avoid making unnecessary assumptions ;-).
315 * struct zone_bitmap contains a pointer to a list of bitmap block
316 * objects and a pointer to the bitmap block object that has been
317 * most recently used for setting bits. Additionally, it contains the
318 * PFNs that correspond to the start and end of the represented zone.
320 * struct bm_block contains a pointer to the memory page in which
321 * information is stored (in the form of a block of bitmap)
322 * It also contains the pfns that correspond to the start and end of
323 * the represented memory area.
325 * The memory bitmap is organized as a radix tree to guarantee fast random
326 * access to the bits. There is one radix tree for each zone (as returned
327 * from create_mem_extents).
329 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
330 * two linked lists for the nodes of the tree, one for the inner nodes and
331 * one for the leave nodes. The linked leave nodes are used for fast linear
332 * access of the memory bitmap.
334 * The struct rtree_node represents one node of the radix tree.
337 #define BM_END_OF_MAP (~0UL)
339 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
340 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
341 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
344 * struct rtree_node is a wrapper struct to link the nodes
345 * of the rtree together for easy linear iteration over
346 * bits and easy freeing
348 struct rtree_node {
349 struct list_head list;
350 unsigned long *data;
354 * struct mem_zone_bm_rtree represents a bitmap used for one
355 * populated memory zone.
357 struct mem_zone_bm_rtree {
358 struct list_head list; /* Link Zones together */
359 struct list_head nodes; /* Radix Tree inner nodes */
360 struct list_head leaves; /* Radix Tree leaves */
361 unsigned long start_pfn; /* Zone start page frame */
362 unsigned long end_pfn; /* Zone end page frame + 1 */
363 struct rtree_node *rtree; /* Radix Tree Root */
364 int levels; /* Number of Radix Tree Levels */
365 unsigned int blocks; /* Number of Bitmap Blocks */
368 /* strcut bm_position is used for browsing memory bitmaps */
370 struct bm_position {
371 struct mem_zone_bm_rtree *zone;
372 struct rtree_node *node;
373 unsigned long node_pfn;
374 int node_bit;
377 struct memory_bitmap {
378 struct list_head zones;
379 struct linked_page *p_list; /* list of pages used to store zone
380 bitmap objects and bitmap block
381 objects */
382 struct bm_position cur; /* most recently used bit position */
385 /* Functions that operate on memory bitmaps */
387 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
388 #if BITS_PER_LONG == 32
389 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
390 #else
391 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
392 #endif
393 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
396 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
398 * This function is used to allocate inner nodes as well as the
399 * leave nodes of the radix tree. It also adds the node to the
400 * corresponding linked list passed in by the *list parameter.
402 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
403 struct chain_allocator *ca,
404 struct list_head *list)
406 struct rtree_node *node;
408 node = chain_alloc(ca, sizeof(struct rtree_node));
409 if (!node)
410 return NULL;
412 node->data = get_image_page(gfp_mask, safe_needed);
413 if (!node->data)
414 return NULL;
416 list_add_tail(&node->list, list);
418 return node;
422 * add_rtree_block - Add a new leave node to the radix tree.
424 * The leave nodes need to be allocated in order to keep the leaves
425 * linked list in order. This is guaranteed by the zone->blocks
426 * counter.
428 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
429 int safe_needed, struct chain_allocator *ca)
431 struct rtree_node *node, *block, **dst;
432 unsigned int levels_needed, block_nr;
433 int i;
435 block_nr = zone->blocks;
436 levels_needed = 0;
438 /* How many levels do we need for this block nr? */
439 while (block_nr) {
440 levels_needed += 1;
441 block_nr >>= BM_RTREE_LEVEL_SHIFT;
444 /* Make sure the rtree has enough levels */
445 for (i = zone->levels; i < levels_needed; i++) {
446 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
447 &zone->nodes);
448 if (!node)
449 return -ENOMEM;
451 node->data[0] = (unsigned long)zone->rtree;
452 zone->rtree = node;
453 zone->levels += 1;
456 /* Allocate new block */
457 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
458 if (!block)
459 return -ENOMEM;
461 /* Now walk the rtree to insert the block */
462 node = zone->rtree;
463 dst = &zone->rtree;
464 block_nr = zone->blocks;
465 for (i = zone->levels; i > 0; i--) {
466 int index;
468 if (!node) {
469 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
470 &zone->nodes);
471 if (!node)
472 return -ENOMEM;
473 *dst = node;
476 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
477 index &= BM_RTREE_LEVEL_MASK;
478 dst = (struct rtree_node **)&((*dst)->data[index]);
479 node = *dst;
482 zone->blocks += 1;
483 *dst = block;
485 return 0;
488 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
489 int clear_nosave_free);
492 * create_zone_bm_rtree - Create a radix tree for one zone.
494 * Allocated the mem_zone_bm_rtree structure and initializes it.
495 * This function also allocated and builds the radix tree for the
496 * zone.
498 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
499 int safe_needed,
500 struct chain_allocator *ca,
501 unsigned long start,
502 unsigned long end)
504 struct mem_zone_bm_rtree *zone;
505 unsigned int i, nr_blocks;
506 unsigned long pages;
508 pages = end - start;
509 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
510 if (!zone)
511 return NULL;
513 INIT_LIST_HEAD(&zone->nodes);
514 INIT_LIST_HEAD(&zone->leaves);
515 zone->start_pfn = start;
516 zone->end_pfn = end;
517 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
519 for (i = 0; i < nr_blocks; i++) {
520 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
521 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
522 return NULL;
526 return zone;
530 * free_zone_bm_rtree - Free the memory of the radix tree.
532 * Free all node pages of the radix tree. The mem_zone_bm_rtree
533 * structure itself is not freed here nor are the rtree_node
534 * structs.
536 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
537 int clear_nosave_free)
539 struct rtree_node *node;
541 list_for_each_entry(node, &zone->nodes, list)
542 free_image_page(node->data, clear_nosave_free);
544 list_for_each_entry(node, &zone->leaves, list)
545 free_image_page(node->data, clear_nosave_free);
548 static void memory_bm_position_reset(struct memory_bitmap *bm)
550 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
551 list);
552 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
553 struct rtree_node, list);
554 bm->cur.node_pfn = 0;
555 bm->cur.node_bit = 0;
558 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
560 struct mem_extent {
561 struct list_head hook;
562 unsigned long start;
563 unsigned long end;
567 * free_mem_extents - Free a list of memory extents.
568 * @list: List of extents to free.
570 static void free_mem_extents(struct list_head *list)
572 struct mem_extent *ext, *aux;
574 list_for_each_entry_safe(ext, aux, list, hook) {
575 list_del(&ext->hook);
576 kfree(ext);
581 * create_mem_extents - Create a list of memory extents.
582 * @list: List to put the extents into.
583 * @gfp_mask: Mask to use for memory allocations.
585 * The extents represent contiguous ranges of PFNs.
587 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
589 struct zone *zone;
591 INIT_LIST_HEAD(list);
593 for_each_populated_zone(zone) {
594 unsigned long zone_start, zone_end;
595 struct mem_extent *ext, *cur, *aux;
597 zone_start = zone->zone_start_pfn;
598 zone_end = zone_end_pfn(zone);
600 list_for_each_entry(ext, list, hook)
601 if (zone_start <= ext->end)
602 break;
604 if (&ext->hook == list || zone_end < ext->start) {
605 /* New extent is necessary */
606 struct mem_extent *new_ext;
608 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
609 if (!new_ext) {
610 free_mem_extents(list);
611 return -ENOMEM;
613 new_ext->start = zone_start;
614 new_ext->end = zone_end;
615 list_add_tail(&new_ext->hook, &ext->hook);
616 continue;
619 /* Merge this zone's range of PFNs with the existing one */
620 if (zone_start < ext->start)
621 ext->start = zone_start;
622 if (zone_end > ext->end)
623 ext->end = zone_end;
625 /* More merging may be possible */
626 cur = ext;
627 list_for_each_entry_safe_continue(cur, aux, list, hook) {
628 if (zone_end < cur->start)
629 break;
630 if (zone_end < cur->end)
631 ext->end = cur->end;
632 list_del(&cur->hook);
633 kfree(cur);
637 return 0;
641 * memory_bm_create - Allocate memory for a memory bitmap.
643 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
644 int safe_needed)
646 struct chain_allocator ca;
647 struct list_head mem_extents;
648 struct mem_extent *ext;
649 int error;
651 chain_init(&ca, gfp_mask, safe_needed);
652 INIT_LIST_HEAD(&bm->zones);
654 error = create_mem_extents(&mem_extents, gfp_mask);
655 if (error)
656 return error;
658 list_for_each_entry(ext, &mem_extents, hook) {
659 struct mem_zone_bm_rtree *zone;
661 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
662 ext->start, ext->end);
663 if (!zone) {
664 error = -ENOMEM;
665 goto Error;
667 list_add_tail(&zone->list, &bm->zones);
670 bm->p_list = ca.chain;
671 memory_bm_position_reset(bm);
672 Exit:
673 free_mem_extents(&mem_extents);
674 return error;
676 Error:
677 bm->p_list = ca.chain;
678 memory_bm_free(bm, PG_UNSAFE_CLEAR);
679 goto Exit;
683 * memory_bm_free - Free memory occupied by the memory bitmap.
684 * @bm: Memory bitmap.
686 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
688 struct mem_zone_bm_rtree *zone;
690 list_for_each_entry(zone, &bm->zones, list)
691 free_zone_bm_rtree(zone, clear_nosave_free);
693 free_list_of_pages(bm->p_list, clear_nosave_free);
695 INIT_LIST_HEAD(&bm->zones);
699 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
701 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
702 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
704 * Walk the radix tree to find the page containing the bit that represents @pfn
705 * and return the position of the bit in @addr and @bit_nr.
707 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
708 void **addr, unsigned int *bit_nr)
710 struct mem_zone_bm_rtree *curr, *zone;
711 struct rtree_node *node;
712 int i, block_nr;
714 zone = bm->cur.zone;
716 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
717 goto zone_found;
719 zone = NULL;
721 /* Find the right zone */
722 list_for_each_entry(curr, &bm->zones, list) {
723 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
724 zone = curr;
725 break;
729 if (!zone)
730 return -EFAULT;
732 zone_found:
734 * We have found the zone. Now walk the radix tree to find the leaf node
735 * for our PFN.
737 node = bm->cur.node;
738 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
739 goto node_found;
741 node = zone->rtree;
742 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
744 for (i = zone->levels; i > 0; i--) {
745 int index;
747 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
748 index &= BM_RTREE_LEVEL_MASK;
749 BUG_ON(node->data[index] == 0);
750 node = (struct rtree_node *)node->data[index];
753 node_found:
754 /* Update last position */
755 bm->cur.zone = zone;
756 bm->cur.node = node;
757 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
759 /* Set return values */
760 *addr = node->data;
761 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
763 return 0;
766 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
768 void *addr;
769 unsigned int bit;
770 int error;
772 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
773 BUG_ON(error);
774 set_bit(bit, addr);
777 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
779 void *addr;
780 unsigned int bit;
781 int error;
783 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
784 if (!error)
785 set_bit(bit, addr);
787 return error;
790 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
792 void *addr;
793 unsigned int bit;
794 int error;
796 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
797 BUG_ON(error);
798 clear_bit(bit, addr);
801 static void memory_bm_clear_current(struct memory_bitmap *bm)
803 int bit;
805 bit = max(bm->cur.node_bit - 1, 0);
806 clear_bit(bit, bm->cur.node->data);
809 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
811 void *addr;
812 unsigned int bit;
813 int error;
815 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
816 BUG_ON(error);
817 return test_bit(bit, addr);
820 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
822 void *addr;
823 unsigned int bit;
825 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
829 * rtree_next_node - Jump to the next leaf node.
831 * Set the position to the beginning of the next node in the
832 * memory bitmap. This is either the next node in the current
833 * zone's radix tree or the first node in the radix tree of the
834 * next zone.
836 * Return true if there is a next node, false otherwise.
838 static bool rtree_next_node(struct memory_bitmap *bm)
840 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
841 bm->cur.node = list_entry(bm->cur.node->list.next,
842 struct rtree_node, list);
843 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
844 bm->cur.node_bit = 0;
845 touch_softlockup_watchdog();
846 return true;
849 /* No more nodes, goto next zone */
850 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
851 bm->cur.zone = list_entry(bm->cur.zone->list.next,
852 struct mem_zone_bm_rtree, list);
853 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
854 struct rtree_node, list);
855 bm->cur.node_pfn = 0;
856 bm->cur.node_bit = 0;
857 return true;
860 /* No more zones */
861 return false;
865 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
866 * @bm: Memory bitmap.
868 * Starting from the last returned position this function searches for the next
869 * set bit in @bm and returns the PFN represented by it. If no more bits are
870 * set, BM_END_OF_MAP is returned.
872 * It is required to run memory_bm_position_reset() before the first call to
873 * this function for the given memory bitmap.
875 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
877 unsigned long bits, pfn, pages;
878 int bit;
880 do {
881 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
882 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
883 bit = find_next_bit(bm->cur.node->data, bits,
884 bm->cur.node_bit);
885 if (bit < bits) {
886 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
887 bm->cur.node_bit = bit + 1;
888 return pfn;
890 } while (rtree_next_node(bm));
892 return BM_END_OF_MAP;
896 * This structure represents a range of page frames the contents of which
897 * should not be saved during hibernation.
899 struct nosave_region {
900 struct list_head list;
901 unsigned long start_pfn;
902 unsigned long end_pfn;
905 static LIST_HEAD(nosave_regions);
907 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
909 struct rtree_node *node;
911 list_for_each_entry(node, &zone->nodes, list)
912 recycle_safe_page(node->data);
914 list_for_each_entry(node, &zone->leaves, list)
915 recycle_safe_page(node->data);
918 static void memory_bm_recycle(struct memory_bitmap *bm)
920 struct mem_zone_bm_rtree *zone;
921 struct linked_page *p_list;
923 list_for_each_entry(zone, &bm->zones, list)
924 recycle_zone_bm_rtree(zone);
926 p_list = bm->p_list;
927 while (p_list) {
928 struct linked_page *lp = p_list;
930 p_list = lp->next;
931 recycle_safe_page(lp);
936 * register_nosave_region - Register a region of unsaveable memory.
938 * Register a range of page frames the contents of which should not be saved
939 * during hibernation (to be used in the early initialization code).
941 void __init __register_nosave_region(unsigned long start_pfn,
942 unsigned long end_pfn, int use_kmalloc)
944 struct nosave_region *region;
946 if (start_pfn >= end_pfn)
947 return;
949 if (!list_empty(&nosave_regions)) {
950 /* Try to extend the previous region (they should be sorted) */
951 region = list_entry(nosave_regions.prev,
952 struct nosave_region, list);
953 if (region->end_pfn == start_pfn) {
954 region->end_pfn = end_pfn;
955 goto Report;
958 if (use_kmalloc) {
959 /* During init, this shouldn't fail */
960 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
961 BUG_ON(!region);
962 } else {
963 /* This allocation cannot fail */
964 region = memblock_alloc(sizeof(struct nosave_region),
965 SMP_CACHE_BYTES);
966 if (!region)
967 panic("%s: Failed to allocate %zu bytes\n", __func__,
968 sizeof(struct nosave_region));
970 region->start_pfn = start_pfn;
971 region->end_pfn = end_pfn;
972 list_add_tail(&region->list, &nosave_regions);
973 Report:
974 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
975 (unsigned long long) start_pfn << PAGE_SHIFT,
976 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
980 * Set bits in this map correspond to the page frames the contents of which
981 * should not be saved during the suspend.
983 static struct memory_bitmap *forbidden_pages_map;
985 /* Set bits in this map correspond to free page frames. */
986 static struct memory_bitmap *free_pages_map;
989 * Each page frame allocated for creating the image is marked by setting the
990 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
993 void swsusp_set_page_free(struct page *page)
995 if (free_pages_map)
996 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
999 static int swsusp_page_is_free(struct page *page)
1001 return free_pages_map ?
1002 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1005 void swsusp_unset_page_free(struct page *page)
1007 if (free_pages_map)
1008 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1011 static void swsusp_set_page_forbidden(struct page *page)
1013 if (forbidden_pages_map)
1014 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1017 int swsusp_page_is_forbidden(struct page *page)
1019 return forbidden_pages_map ?
1020 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1023 static void swsusp_unset_page_forbidden(struct page *page)
1025 if (forbidden_pages_map)
1026 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1030 * mark_nosave_pages - Mark pages that should not be saved.
1031 * @bm: Memory bitmap.
1033 * Set the bits in @bm that correspond to the page frames the contents of which
1034 * should not be saved.
1036 static void mark_nosave_pages(struct memory_bitmap *bm)
1038 struct nosave_region *region;
1040 if (list_empty(&nosave_regions))
1041 return;
1043 list_for_each_entry(region, &nosave_regions, list) {
1044 unsigned long pfn;
1046 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1047 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1048 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1049 - 1);
1051 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1052 if (pfn_valid(pfn)) {
1054 * It is safe to ignore the result of
1055 * mem_bm_set_bit_check() here, since we won't
1056 * touch the PFNs for which the error is
1057 * returned anyway.
1059 mem_bm_set_bit_check(bm, pfn);
1065 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1067 * Create bitmaps needed for marking page frames that should not be saved and
1068 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1069 * only modified if everything goes well, because we don't want the bits to be
1070 * touched before both bitmaps are set up.
1072 int create_basic_memory_bitmaps(void)
1074 struct memory_bitmap *bm1, *bm2;
1075 int error = 0;
1077 if (forbidden_pages_map && free_pages_map)
1078 return 0;
1079 else
1080 BUG_ON(forbidden_pages_map || free_pages_map);
1082 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1083 if (!bm1)
1084 return -ENOMEM;
1086 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1087 if (error)
1088 goto Free_first_object;
1090 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1091 if (!bm2)
1092 goto Free_first_bitmap;
1094 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1095 if (error)
1096 goto Free_second_object;
1098 forbidden_pages_map = bm1;
1099 free_pages_map = bm2;
1100 mark_nosave_pages(forbidden_pages_map);
1102 pr_debug("Basic memory bitmaps created\n");
1104 return 0;
1106 Free_second_object:
1107 kfree(bm2);
1108 Free_first_bitmap:
1109 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1110 Free_first_object:
1111 kfree(bm1);
1112 return -ENOMEM;
1116 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1118 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1119 * auxiliary pointers are necessary so that the bitmaps themselves are not
1120 * referred to while they are being freed.
1122 void free_basic_memory_bitmaps(void)
1124 struct memory_bitmap *bm1, *bm2;
1126 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1127 return;
1129 bm1 = forbidden_pages_map;
1130 bm2 = free_pages_map;
1131 forbidden_pages_map = NULL;
1132 free_pages_map = NULL;
1133 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1134 kfree(bm1);
1135 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1136 kfree(bm2);
1138 pr_debug("Basic memory bitmaps freed\n");
1141 void clear_free_pages(void)
1143 #ifdef CONFIG_PAGE_POISONING_ZERO
1144 struct memory_bitmap *bm = free_pages_map;
1145 unsigned long pfn;
1147 if (WARN_ON(!(free_pages_map)))
1148 return;
1150 memory_bm_position_reset(bm);
1151 pfn = memory_bm_next_pfn(bm);
1152 while (pfn != BM_END_OF_MAP) {
1153 if (pfn_valid(pfn))
1154 clear_highpage(pfn_to_page(pfn));
1156 pfn = memory_bm_next_pfn(bm);
1158 memory_bm_position_reset(bm);
1159 pr_info("free pages cleared after restore\n");
1160 #endif /* PAGE_POISONING_ZERO */
1164 * snapshot_additional_pages - Estimate the number of extra pages needed.
1165 * @zone: Memory zone to carry out the computation for.
1167 * Estimate the number of additional pages needed for setting up a hibernation
1168 * image data structures for @zone (usually, the returned value is greater than
1169 * the exact number).
1171 unsigned int snapshot_additional_pages(struct zone *zone)
1173 unsigned int rtree, nodes;
1175 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1176 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1177 LINKED_PAGE_DATA_SIZE);
1178 while (nodes > 1) {
1179 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1180 rtree += nodes;
1183 return 2 * rtree;
1186 #ifdef CONFIG_HIGHMEM
1188 * count_free_highmem_pages - Compute the total number of free highmem pages.
1190 * The returned number is system-wide.
1192 static unsigned int count_free_highmem_pages(void)
1194 struct zone *zone;
1195 unsigned int cnt = 0;
1197 for_each_populated_zone(zone)
1198 if (is_highmem(zone))
1199 cnt += zone_page_state(zone, NR_FREE_PAGES);
1201 return cnt;
1205 * saveable_highmem_page - Check if a highmem page is saveable.
1207 * Determine whether a highmem page should be included in a hibernation image.
1209 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1210 * and it isn't part of a free chunk of pages.
1212 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1214 struct page *page;
1216 if (!pfn_valid(pfn))
1217 return NULL;
1219 page = pfn_to_online_page(pfn);
1220 if (!page || page_zone(page) != zone)
1221 return NULL;
1223 BUG_ON(!PageHighMem(page));
1225 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1226 return NULL;
1228 if (PageReserved(page) || PageOffline(page))
1229 return NULL;
1231 if (page_is_guard(page))
1232 return NULL;
1234 return page;
1238 * count_highmem_pages - Compute the total number of saveable highmem pages.
1240 static unsigned int count_highmem_pages(void)
1242 struct zone *zone;
1243 unsigned int n = 0;
1245 for_each_populated_zone(zone) {
1246 unsigned long pfn, max_zone_pfn;
1248 if (!is_highmem(zone))
1249 continue;
1251 mark_free_pages(zone);
1252 max_zone_pfn = zone_end_pfn(zone);
1253 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1254 if (saveable_highmem_page(zone, pfn))
1255 n++;
1257 return n;
1259 #else
1260 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1262 return NULL;
1264 #endif /* CONFIG_HIGHMEM */
1267 * saveable_page - Check if the given page is saveable.
1269 * Determine whether a non-highmem page should be included in a hibernation
1270 * image.
1272 * We should save the page if it isn't Nosave, and is not in the range
1273 * of pages statically defined as 'unsaveable', and it isn't part of
1274 * a free chunk of pages.
1276 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1278 struct page *page;
1280 if (!pfn_valid(pfn))
1281 return NULL;
1283 page = pfn_to_online_page(pfn);
1284 if (!page || page_zone(page) != zone)
1285 return NULL;
1287 BUG_ON(PageHighMem(page));
1289 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1290 return NULL;
1292 if (PageOffline(page))
1293 return NULL;
1295 if (PageReserved(page)
1296 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1297 return NULL;
1299 if (page_is_guard(page))
1300 return NULL;
1302 return page;
1306 * count_data_pages - Compute the total number of saveable non-highmem pages.
1308 static unsigned int count_data_pages(void)
1310 struct zone *zone;
1311 unsigned long pfn, max_zone_pfn;
1312 unsigned int n = 0;
1314 for_each_populated_zone(zone) {
1315 if (is_highmem(zone))
1316 continue;
1318 mark_free_pages(zone);
1319 max_zone_pfn = zone_end_pfn(zone);
1320 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1321 if (saveable_page(zone, pfn))
1322 n++;
1324 return n;
1328 * This is needed, because copy_page and memcpy are not usable for copying
1329 * task structs.
1331 static inline void do_copy_page(long *dst, long *src)
1333 int n;
1335 for (n = PAGE_SIZE / sizeof(long); n; n--)
1336 *dst++ = *src++;
1340 * safe_copy_page - Copy a page in a safe way.
1342 * Check if the page we are going to copy is marked as present in the kernel
1343 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1344 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1345 * always returns 'true'.
1347 static void safe_copy_page(void *dst, struct page *s_page)
1349 if (kernel_page_present(s_page)) {
1350 do_copy_page(dst, page_address(s_page));
1351 } else {
1352 kernel_map_pages(s_page, 1, 1);
1353 do_copy_page(dst, page_address(s_page));
1354 kernel_map_pages(s_page, 1, 0);
1358 #ifdef CONFIG_HIGHMEM
1359 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1361 return is_highmem(zone) ?
1362 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1365 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1367 struct page *s_page, *d_page;
1368 void *src, *dst;
1370 s_page = pfn_to_page(src_pfn);
1371 d_page = pfn_to_page(dst_pfn);
1372 if (PageHighMem(s_page)) {
1373 src = kmap_atomic(s_page);
1374 dst = kmap_atomic(d_page);
1375 do_copy_page(dst, src);
1376 kunmap_atomic(dst);
1377 kunmap_atomic(src);
1378 } else {
1379 if (PageHighMem(d_page)) {
1381 * The page pointed to by src may contain some kernel
1382 * data modified by kmap_atomic()
1384 safe_copy_page(buffer, s_page);
1385 dst = kmap_atomic(d_page);
1386 copy_page(dst, buffer);
1387 kunmap_atomic(dst);
1388 } else {
1389 safe_copy_page(page_address(d_page), s_page);
1393 #else
1394 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1396 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1398 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1399 pfn_to_page(src_pfn));
1401 #endif /* CONFIG_HIGHMEM */
1403 static void copy_data_pages(struct memory_bitmap *copy_bm,
1404 struct memory_bitmap *orig_bm)
1406 struct zone *zone;
1407 unsigned long pfn;
1409 for_each_populated_zone(zone) {
1410 unsigned long max_zone_pfn;
1412 mark_free_pages(zone);
1413 max_zone_pfn = zone_end_pfn(zone);
1414 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1415 if (page_is_saveable(zone, pfn))
1416 memory_bm_set_bit(orig_bm, pfn);
1418 memory_bm_position_reset(orig_bm);
1419 memory_bm_position_reset(copy_bm);
1420 for(;;) {
1421 pfn = memory_bm_next_pfn(orig_bm);
1422 if (unlikely(pfn == BM_END_OF_MAP))
1423 break;
1424 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1428 /* Total number of image pages */
1429 static unsigned int nr_copy_pages;
1430 /* Number of pages needed for saving the original pfns of the image pages */
1431 static unsigned int nr_meta_pages;
1433 * Numbers of normal and highmem page frames allocated for hibernation image
1434 * before suspending devices.
1436 static unsigned int alloc_normal, alloc_highmem;
1438 * Memory bitmap used for marking saveable pages (during hibernation) or
1439 * hibernation image pages (during restore)
1441 static struct memory_bitmap orig_bm;
1443 * Memory bitmap used during hibernation for marking allocated page frames that
1444 * will contain copies of saveable pages. During restore it is initially used
1445 * for marking hibernation image pages, but then the set bits from it are
1446 * duplicated in @orig_bm and it is released. On highmem systems it is next
1447 * used for marking "safe" highmem pages, but it has to be reinitialized for
1448 * this purpose.
1450 static struct memory_bitmap copy_bm;
1453 * swsusp_free - Free pages allocated for hibernation image.
1455 * Image pages are alocated before snapshot creation, so they need to be
1456 * released after resume.
1458 void swsusp_free(void)
1460 unsigned long fb_pfn, fr_pfn;
1462 if (!forbidden_pages_map || !free_pages_map)
1463 goto out;
1465 memory_bm_position_reset(forbidden_pages_map);
1466 memory_bm_position_reset(free_pages_map);
1468 loop:
1469 fr_pfn = memory_bm_next_pfn(free_pages_map);
1470 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1473 * Find the next bit set in both bitmaps. This is guaranteed to
1474 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1476 do {
1477 if (fb_pfn < fr_pfn)
1478 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1479 if (fr_pfn < fb_pfn)
1480 fr_pfn = memory_bm_next_pfn(free_pages_map);
1481 } while (fb_pfn != fr_pfn);
1483 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1484 struct page *page = pfn_to_page(fr_pfn);
1486 memory_bm_clear_current(forbidden_pages_map);
1487 memory_bm_clear_current(free_pages_map);
1488 hibernate_restore_unprotect_page(page_address(page));
1489 __free_page(page);
1490 goto loop;
1493 out:
1494 nr_copy_pages = 0;
1495 nr_meta_pages = 0;
1496 restore_pblist = NULL;
1497 buffer = NULL;
1498 alloc_normal = 0;
1499 alloc_highmem = 0;
1500 hibernate_restore_protection_end();
1503 /* Helper functions used for the shrinking of memory. */
1505 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1508 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1509 * @nr_pages: Number of page frames to allocate.
1510 * @mask: GFP flags to use for the allocation.
1512 * Return value: Number of page frames actually allocated
1514 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1516 unsigned long nr_alloc = 0;
1518 while (nr_pages > 0) {
1519 struct page *page;
1521 page = alloc_image_page(mask);
1522 if (!page)
1523 break;
1524 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1525 if (PageHighMem(page))
1526 alloc_highmem++;
1527 else
1528 alloc_normal++;
1529 nr_pages--;
1530 nr_alloc++;
1533 return nr_alloc;
1536 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1537 unsigned long avail_normal)
1539 unsigned long alloc;
1541 if (avail_normal <= alloc_normal)
1542 return 0;
1544 alloc = avail_normal - alloc_normal;
1545 if (nr_pages < alloc)
1546 alloc = nr_pages;
1548 return preallocate_image_pages(alloc, GFP_IMAGE);
1551 #ifdef CONFIG_HIGHMEM
1552 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1554 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1558 * __fraction - Compute (an approximation of) x * (multiplier / base).
1560 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1562 x *= multiplier;
1563 do_div(x, base);
1564 return (unsigned long)x;
1567 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1568 unsigned long highmem,
1569 unsigned long total)
1571 unsigned long alloc = __fraction(nr_pages, highmem, total);
1573 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1575 #else /* CONFIG_HIGHMEM */
1576 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1578 return 0;
1581 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1582 unsigned long highmem,
1583 unsigned long total)
1585 return 0;
1587 #endif /* CONFIG_HIGHMEM */
1590 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1592 static unsigned long free_unnecessary_pages(void)
1594 unsigned long save, to_free_normal, to_free_highmem, free;
1596 save = count_data_pages();
1597 if (alloc_normal >= save) {
1598 to_free_normal = alloc_normal - save;
1599 save = 0;
1600 } else {
1601 to_free_normal = 0;
1602 save -= alloc_normal;
1604 save += count_highmem_pages();
1605 if (alloc_highmem >= save) {
1606 to_free_highmem = alloc_highmem - save;
1607 } else {
1608 to_free_highmem = 0;
1609 save -= alloc_highmem;
1610 if (to_free_normal > save)
1611 to_free_normal -= save;
1612 else
1613 to_free_normal = 0;
1615 free = to_free_normal + to_free_highmem;
1617 memory_bm_position_reset(&copy_bm);
1619 while (to_free_normal > 0 || to_free_highmem > 0) {
1620 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1621 struct page *page = pfn_to_page(pfn);
1623 if (PageHighMem(page)) {
1624 if (!to_free_highmem)
1625 continue;
1626 to_free_highmem--;
1627 alloc_highmem--;
1628 } else {
1629 if (!to_free_normal)
1630 continue;
1631 to_free_normal--;
1632 alloc_normal--;
1634 memory_bm_clear_bit(&copy_bm, pfn);
1635 swsusp_unset_page_forbidden(page);
1636 swsusp_unset_page_free(page);
1637 __free_page(page);
1640 return free;
1644 * minimum_image_size - Estimate the minimum acceptable size of an image.
1645 * @saveable: Number of saveable pages in the system.
1647 * We want to avoid attempting to free too much memory too hard, so estimate the
1648 * minimum acceptable size of a hibernation image to use as the lower limit for
1649 * preallocating memory.
1651 * We assume that the minimum image size should be proportional to
1653 * [number of saveable pages] - [number of pages that can be freed in theory]
1655 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1656 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1658 static unsigned long minimum_image_size(unsigned long saveable)
1660 unsigned long size;
1662 size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1663 + global_node_page_state(NR_ACTIVE_ANON)
1664 + global_node_page_state(NR_INACTIVE_ANON)
1665 + global_node_page_state(NR_ACTIVE_FILE)
1666 + global_node_page_state(NR_INACTIVE_FILE);
1668 return saveable <= size ? 0 : saveable - size;
1672 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1674 * To create a hibernation image it is necessary to make a copy of every page
1675 * frame in use. We also need a number of page frames to be free during
1676 * hibernation for allocations made while saving the image and for device
1677 * drivers, in case they need to allocate memory from their hibernation
1678 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1679 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1680 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1681 * total number of available page frames and allocate at least
1683 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1684 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1686 * of them, which corresponds to the maximum size of a hibernation image.
1688 * If image_size is set below the number following from the above formula,
1689 * the preallocation of memory is continued until the total number of saveable
1690 * pages in the system is below the requested image size or the minimum
1691 * acceptable image size returned by minimum_image_size(), whichever is greater.
1693 int hibernate_preallocate_memory(void)
1695 struct zone *zone;
1696 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1697 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1698 ktime_t start, stop;
1699 int error;
1701 pr_info("Preallocating image memory... ");
1702 start = ktime_get();
1704 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1705 if (error)
1706 goto err_out;
1708 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1709 if (error)
1710 goto err_out;
1712 alloc_normal = 0;
1713 alloc_highmem = 0;
1715 /* Count the number of saveable data pages. */
1716 save_highmem = count_highmem_pages();
1717 saveable = count_data_pages();
1720 * Compute the total number of page frames we can use (count) and the
1721 * number of pages needed for image metadata (size).
1723 count = saveable;
1724 saveable += save_highmem;
1725 highmem = save_highmem;
1726 size = 0;
1727 for_each_populated_zone(zone) {
1728 size += snapshot_additional_pages(zone);
1729 if (is_highmem(zone))
1730 highmem += zone_page_state(zone, NR_FREE_PAGES);
1731 else
1732 count += zone_page_state(zone, NR_FREE_PAGES);
1734 avail_normal = count;
1735 count += highmem;
1736 count -= totalreserve_pages;
1738 /* Add number of pages required for page keys (s390 only). */
1739 size += page_key_additional_pages(saveable);
1741 /* Compute the maximum number of saveable pages to leave in memory. */
1742 max_size = (count - (size + PAGES_FOR_IO)) / 2
1743 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1744 /* Compute the desired number of image pages specified by image_size. */
1745 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1746 if (size > max_size)
1747 size = max_size;
1749 * If the desired number of image pages is at least as large as the
1750 * current number of saveable pages in memory, allocate page frames for
1751 * the image and we're done.
1753 if (size >= saveable) {
1754 pages = preallocate_image_highmem(save_highmem);
1755 pages += preallocate_image_memory(saveable - pages, avail_normal);
1756 goto out;
1759 /* Estimate the minimum size of the image. */
1760 pages = minimum_image_size(saveable);
1762 * To avoid excessive pressure on the normal zone, leave room in it to
1763 * accommodate an image of the minimum size (unless it's already too
1764 * small, in which case don't preallocate pages from it at all).
1766 if (avail_normal > pages)
1767 avail_normal -= pages;
1768 else
1769 avail_normal = 0;
1770 if (size < pages)
1771 size = min_t(unsigned long, pages, max_size);
1774 * Let the memory management subsystem know that we're going to need a
1775 * large number of page frames to allocate and make it free some memory.
1776 * NOTE: If this is not done, performance will be hurt badly in some
1777 * test cases.
1779 shrink_all_memory(saveable - size);
1782 * The number of saveable pages in memory was too high, so apply some
1783 * pressure to decrease it. First, make room for the largest possible
1784 * image and fail if that doesn't work. Next, try to decrease the size
1785 * of the image as much as indicated by 'size' using allocations from
1786 * highmem and non-highmem zones separately.
1788 pages_highmem = preallocate_image_highmem(highmem / 2);
1789 alloc = count - max_size;
1790 if (alloc > pages_highmem)
1791 alloc -= pages_highmem;
1792 else
1793 alloc = 0;
1794 pages = preallocate_image_memory(alloc, avail_normal);
1795 if (pages < alloc) {
1796 /* We have exhausted non-highmem pages, try highmem. */
1797 alloc -= pages;
1798 pages += pages_highmem;
1799 pages_highmem = preallocate_image_highmem(alloc);
1800 if (pages_highmem < alloc)
1801 goto err_out;
1802 pages += pages_highmem;
1804 * size is the desired number of saveable pages to leave in
1805 * memory, so try to preallocate (all memory - size) pages.
1807 alloc = (count - pages) - size;
1808 pages += preallocate_image_highmem(alloc);
1809 } else {
1811 * There are approximately max_size saveable pages at this point
1812 * and we want to reduce this number down to size.
1814 alloc = max_size - size;
1815 size = preallocate_highmem_fraction(alloc, highmem, count);
1816 pages_highmem += size;
1817 alloc -= size;
1818 size = preallocate_image_memory(alloc, avail_normal);
1819 pages_highmem += preallocate_image_highmem(alloc - size);
1820 pages += pages_highmem + size;
1824 * We only need as many page frames for the image as there are saveable
1825 * pages in memory, but we have allocated more. Release the excessive
1826 * ones now.
1828 pages -= free_unnecessary_pages();
1830 out:
1831 stop = ktime_get();
1832 pr_cont("done (allocated %lu pages)\n", pages);
1833 swsusp_show_speed(start, stop, pages, "Allocated");
1835 return 0;
1837 err_out:
1838 pr_cont("\n");
1839 swsusp_free();
1840 return -ENOMEM;
1843 #ifdef CONFIG_HIGHMEM
1845 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1847 * Compute the number of non-highmem pages that will be necessary for creating
1848 * copies of highmem pages.
1850 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1852 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1854 if (free_highmem >= nr_highmem)
1855 nr_highmem = 0;
1856 else
1857 nr_highmem -= free_highmem;
1859 return nr_highmem;
1861 #else
1862 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1863 #endif /* CONFIG_HIGHMEM */
1866 * enough_free_mem - Check if there is enough free memory for the image.
1868 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1870 struct zone *zone;
1871 unsigned int free = alloc_normal;
1873 for_each_populated_zone(zone)
1874 if (!is_highmem(zone))
1875 free += zone_page_state(zone, NR_FREE_PAGES);
1877 nr_pages += count_pages_for_highmem(nr_highmem);
1878 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1879 nr_pages, PAGES_FOR_IO, free);
1881 return free > nr_pages + PAGES_FOR_IO;
1884 #ifdef CONFIG_HIGHMEM
1886 * get_highmem_buffer - Allocate a buffer for highmem pages.
1888 * If there are some highmem pages in the hibernation image, we may need a
1889 * buffer to copy them and/or load their data.
1891 static inline int get_highmem_buffer(int safe_needed)
1893 buffer = get_image_page(GFP_ATOMIC, safe_needed);
1894 return buffer ? 0 : -ENOMEM;
1898 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1900 * Try to allocate as many pages as needed, but if the number of free highmem
1901 * pages is less than that, allocate them all.
1903 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1904 unsigned int nr_highmem)
1906 unsigned int to_alloc = count_free_highmem_pages();
1908 if (to_alloc > nr_highmem)
1909 to_alloc = nr_highmem;
1911 nr_highmem -= to_alloc;
1912 while (to_alloc-- > 0) {
1913 struct page *page;
1915 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1916 memory_bm_set_bit(bm, page_to_pfn(page));
1918 return nr_highmem;
1920 #else
1921 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1923 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1924 unsigned int n) { return 0; }
1925 #endif /* CONFIG_HIGHMEM */
1928 * swsusp_alloc - Allocate memory for hibernation image.
1930 * We first try to allocate as many highmem pages as there are
1931 * saveable highmem pages in the system. If that fails, we allocate
1932 * non-highmem pages for the copies of the remaining highmem ones.
1934 * In this approach it is likely that the copies of highmem pages will
1935 * also be located in the high memory, because of the way in which
1936 * copy_data_pages() works.
1938 static int swsusp_alloc(struct memory_bitmap *copy_bm,
1939 unsigned int nr_pages, unsigned int nr_highmem)
1941 if (nr_highmem > 0) {
1942 if (get_highmem_buffer(PG_ANY))
1943 goto err_out;
1944 if (nr_highmem > alloc_highmem) {
1945 nr_highmem -= alloc_highmem;
1946 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1949 if (nr_pages > alloc_normal) {
1950 nr_pages -= alloc_normal;
1951 while (nr_pages-- > 0) {
1952 struct page *page;
1954 page = alloc_image_page(GFP_ATOMIC);
1955 if (!page)
1956 goto err_out;
1957 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1961 return 0;
1963 err_out:
1964 swsusp_free();
1965 return -ENOMEM;
1968 asmlinkage __visible int swsusp_save(void)
1970 unsigned int nr_pages, nr_highmem;
1972 pr_info("Creating hibernation image:\n");
1974 drain_local_pages(NULL);
1975 nr_pages = count_data_pages();
1976 nr_highmem = count_highmem_pages();
1977 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1979 if (!enough_free_mem(nr_pages, nr_highmem)) {
1980 pr_err("Not enough free memory\n");
1981 return -ENOMEM;
1984 if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
1985 pr_err("Memory allocation failed\n");
1986 return -ENOMEM;
1990 * During allocating of suspend pagedir, new cold pages may appear.
1991 * Kill them.
1993 drain_local_pages(NULL);
1994 copy_data_pages(&copy_bm, &orig_bm);
1997 * End of critical section. From now on, we can write to memory,
1998 * but we should not touch disk. This specially means we must _not_
1999 * touch swap space! Except we must write out our image of course.
2002 nr_pages += nr_highmem;
2003 nr_copy_pages = nr_pages;
2004 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2006 pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
2008 return 0;
2011 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2012 static int init_header_complete(struct swsusp_info *info)
2014 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2015 info->version_code = LINUX_VERSION_CODE;
2016 return 0;
2019 static char *check_image_kernel(struct swsusp_info *info)
2021 if (info->version_code != LINUX_VERSION_CODE)
2022 return "kernel version";
2023 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2024 return "system type";
2025 if (strcmp(info->uts.release,init_utsname()->release))
2026 return "kernel release";
2027 if (strcmp(info->uts.version,init_utsname()->version))
2028 return "version";
2029 if (strcmp(info->uts.machine,init_utsname()->machine))
2030 return "machine";
2031 return NULL;
2033 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2035 unsigned long snapshot_get_image_size(void)
2037 return nr_copy_pages + nr_meta_pages + 1;
2040 static int init_header(struct swsusp_info *info)
2042 memset(info, 0, sizeof(struct swsusp_info));
2043 info->num_physpages = get_num_physpages();
2044 info->image_pages = nr_copy_pages;
2045 info->pages = snapshot_get_image_size();
2046 info->size = info->pages;
2047 info->size <<= PAGE_SHIFT;
2048 return init_header_complete(info);
2052 * pack_pfns - Prepare PFNs for saving.
2053 * @bm: Memory bitmap.
2054 * @buf: Memory buffer to store the PFNs in.
2056 * PFNs corresponding to set bits in @bm are stored in the area of memory
2057 * pointed to by @buf (1 page at a time).
2059 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2061 int j;
2063 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2064 buf[j] = memory_bm_next_pfn(bm);
2065 if (unlikely(buf[j] == BM_END_OF_MAP))
2066 break;
2067 /* Save page key for data page (s390 only). */
2068 page_key_read(buf + j);
2073 * snapshot_read_next - Get the address to read the next image page from.
2074 * @handle: Snapshot handle to be used for the reading.
2076 * On the first call, @handle should point to a zeroed snapshot_handle
2077 * structure. The structure gets populated then and a pointer to it should be
2078 * passed to this function every next time.
2080 * On success, the function returns a positive number. Then, the caller
2081 * is allowed to read up to the returned number of bytes from the memory
2082 * location computed by the data_of() macro.
2084 * The function returns 0 to indicate the end of the data stream condition,
2085 * and negative numbers are returned on errors. If that happens, the structure
2086 * pointed to by @handle is not updated and should not be used any more.
2088 int snapshot_read_next(struct snapshot_handle *handle)
2090 if (handle->cur > nr_meta_pages + nr_copy_pages)
2091 return 0;
2093 if (!buffer) {
2094 /* This makes the buffer be freed by swsusp_free() */
2095 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2096 if (!buffer)
2097 return -ENOMEM;
2099 if (!handle->cur) {
2100 int error;
2102 error = init_header((struct swsusp_info *)buffer);
2103 if (error)
2104 return error;
2105 handle->buffer = buffer;
2106 memory_bm_position_reset(&orig_bm);
2107 memory_bm_position_reset(&copy_bm);
2108 } else if (handle->cur <= nr_meta_pages) {
2109 clear_page(buffer);
2110 pack_pfns(buffer, &orig_bm);
2111 } else {
2112 struct page *page;
2114 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2115 if (PageHighMem(page)) {
2117 * Highmem pages are copied to the buffer,
2118 * because we can't return with a kmapped
2119 * highmem page (we may not be called again).
2121 void *kaddr;
2123 kaddr = kmap_atomic(page);
2124 copy_page(buffer, kaddr);
2125 kunmap_atomic(kaddr);
2126 handle->buffer = buffer;
2127 } else {
2128 handle->buffer = page_address(page);
2131 handle->cur++;
2132 return PAGE_SIZE;
2135 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2136 struct memory_bitmap *src)
2138 unsigned long pfn;
2140 memory_bm_position_reset(src);
2141 pfn = memory_bm_next_pfn(src);
2142 while (pfn != BM_END_OF_MAP) {
2143 memory_bm_set_bit(dst, pfn);
2144 pfn = memory_bm_next_pfn(src);
2149 * mark_unsafe_pages - Mark pages that were used before hibernation.
2151 * Mark the pages that cannot be used for storing the image during restoration,
2152 * because they conflict with the pages that had been used before hibernation.
2154 static void mark_unsafe_pages(struct memory_bitmap *bm)
2156 unsigned long pfn;
2158 /* Clear the "free"/"unsafe" bit for all PFNs */
2159 memory_bm_position_reset(free_pages_map);
2160 pfn = memory_bm_next_pfn(free_pages_map);
2161 while (pfn != BM_END_OF_MAP) {
2162 memory_bm_clear_current(free_pages_map);
2163 pfn = memory_bm_next_pfn(free_pages_map);
2166 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2167 duplicate_memory_bitmap(free_pages_map, bm);
2169 allocated_unsafe_pages = 0;
2172 static int check_header(struct swsusp_info *info)
2174 char *reason;
2176 reason = check_image_kernel(info);
2177 if (!reason && info->num_physpages != get_num_physpages())
2178 reason = "memory size";
2179 if (reason) {
2180 pr_err("Image mismatch: %s\n", reason);
2181 return -EPERM;
2183 return 0;
2187 * load header - Check the image header and copy the data from it.
2189 static int load_header(struct swsusp_info *info)
2191 int error;
2193 restore_pblist = NULL;
2194 error = check_header(info);
2195 if (!error) {
2196 nr_copy_pages = info->image_pages;
2197 nr_meta_pages = info->pages - info->image_pages - 1;
2199 return error;
2203 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2204 * @bm: Memory bitmap.
2205 * @buf: Area of memory containing the PFNs.
2207 * For each element of the array pointed to by @buf (1 page at a time), set the
2208 * corresponding bit in @bm.
2210 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2212 int j;
2214 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2215 if (unlikely(buf[j] == BM_END_OF_MAP))
2216 break;
2218 /* Extract and buffer page key for data page (s390 only). */
2219 page_key_memorize(buf + j);
2221 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2222 memory_bm_set_bit(bm, buf[j]);
2223 else
2224 return -EFAULT;
2227 return 0;
2230 #ifdef CONFIG_HIGHMEM
2232 * struct highmem_pbe is used for creating the list of highmem pages that
2233 * should be restored atomically during the resume from disk, because the page
2234 * frames they have occupied before the suspend are in use.
2236 struct highmem_pbe {
2237 struct page *copy_page; /* data is here now */
2238 struct page *orig_page; /* data was here before the suspend */
2239 struct highmem_pbe *next;
2243 * List of highmem PBEs needed for restoring the highmem pages that were
2244 * allocated before the suspend and included in the suspend image, but have
2245 * also been allocated by the "resume" kernel, so their contents cannot be
2246 * written directly to their "original" page frames.
2248 static struct highmem_pbe *highmem_pblist;
2251 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2252 * @bm: Memory bitmap.
2254 * The bits in @bm that correspond to image pages are assumed to be set.
2256 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2258 unsigned long pfn;
2259 unsigned int cnt = 0;
2261 memory_bm_position_reset(bm);
2262 pfn = memory_bm_next_pfn(bm);
2263 while (pfn != BM_END_OF_MAP) {
2264 if (PageHighMem(pfn_to_page(pfn)))
2265 cnt++;
2267 pfn = memory_bm_next_pfn(bm);
2269 return cnt;
2272 static unsigned int safe_highmem_pages;
2274 static struct memory_bitmap *safe_highmem_bm;
2277 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2278 * @bm: Pointer to an uninitialized memory bitmap structure.
2279 * @nr_highmem_p: Pointer to the number of highmem image pages.
2281 * Try to allocate as many highmem pages as there are highmem image pages
2282 * (@nr_highmem_p points to the variable containing the number of highmem image
2283 * pages). The pages that are "safe" (ie. will not be overwritten when the
2284 * hibernation image is restored entirely) have the corresponding bits set in
2285 * @bm (it must be unitialized).
2287 * NOTE: This function should not be called if there are no highmem image pages.
2289 static int prepare_highmem_image(struct memory_bitmap *bm,
2290 unsigned int *nr_highmem_p)
2292 unsigned int to_alloc;
2294 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2295 return -ENOMEM;
2297 if (get_highmem_buffer(PG_SAFE))
2298 return -ENOMEM;
2300 to_alloc = count_free_highmem_pages();
2301 if (to_alloc > *nr_highmem_p)
2302 to_alloc = *nr_highmem_p;
2303 else
2304 *nr_highmem_p = to_alloc;
2306 safe_highmem_pages = 0;
2307 while (to_alloc-- > 0) {
2308 struct page *page;
2310 page = alloc_page(__GFP_HIGHMEM);
2311 if (!swsusp_page_is_free(page)) {
2312 /* The page is "safe", set its bit the bitmap */
2313 memory_bm_set_bit(bm, page_to_pfn(page));
2314 safe_highmem_pages++;
2316 /* Mark the page as allocated */
2317 swsusp_set_page_forbidden(page);
2318 swsusp_set_page_free(page);
2320 memory_bm_position_reset(bm);
2321 safe_highmem_bm = bm;
2322 return 0;
2325 static struct page *last_highmem_page;
2328 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2330 * For a given highmem image page get a buffer that suspend_write_next() should
2331 * return to its caller to write to.
2333 * If the page is to be saved to its "original" page frame or a copy of
2334 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2335 * the copy of the page is to be made in normal memory, so the address of
2336 * the copy is returned.
2338 * If @buffer is returned, the caller of suspend_write_next() will write
2339 * the page's contents to @buffer, so they will have to be copied to the
2340 * right location on the next call to suspend_write_next() and it is done
2341 * with the help of copy_last_highmem_page(). For this purpose, if
2342 * @buffer is returned, @last_highmem_page is set to the page to which
2343 * the data will have to be copied from @buffer.
2345 static void *get_highmem_page_buffer(struct page *page,
2346 struct chain_allocator *ca)
2348 struct highmem_pbe *pbe;
2349 void *kaddr;
2351 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2353 * We have allocated the "original" page frame and we can
2354 * use it directly to store the loaded page.
2356 last_highmem_page = page;
2357 return buffer;
2360 * The "original" page frame has not been allocated and we have to
2361 * use a "safe" page frame to store the loaded page.
2363 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2364 if (!pbe) {
2365 swsusp_free();
2366 return ERR_PTR(-ENOMEM);
2368 pbe->orig_page = page;
2369 if (safe_highmem_pages > 0) {
2370 struct page *tmp;
2372 /* Copy of the page will be stored in high memory */
2373 kaddr = buffer;
2374 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2375 safe_highmem_pages--;
2376 last_highmem_page = tmp;
2377 pbe->copy_page = tmp;
2378 } else {
2379 /* Copy of the page will be stored in normal memory */
2380 kaddr = safe_pages_list;
2381 safe_pages_list = safe_pages_list->next;
2382 pbe->copy_page = virt_to_page(kaddr);
2384 pbe->next = highmem_pblist;
2385 highmem_pblist = pbe;
2386 return kaddr;
2390 * copy_last_highmem_page - Copy most the most recent highmem image page.
2392 * Copy the contents of a highmem image from @buffer, where the caller of
2393 * snapshot_write_next() has stored them, to the right location represented by
2394 * @last_highmem_page .
2396 static void copy_last_highmem_page(void)
2398 if (last_highmem_page) {
2399 void *dst;
2401 dst = kmap_atomic(last_highmem_page);
2402 copy_page(dst, buffer);
2403 kunmap_atomic(dst);
2404 last_highmem_page = NULL;
2408 static inline int last_highmem_page_copied(void)
2410 return !last_highmem_page;
2413 static inline void free_highmem_data(void)
2415 if (safe_highmem_bm)
2416 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2418 if (buffer)
2419 free_image_page(buffer, PG_UNSAFE_CLEAR);
2421 #else
2422 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2424 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2425 unsigned int *nr_highmem_p) { return 0; }
2427 static inline void *get_highmem_page_buffer(struct page *page,
2428 struct chain_allocator *ca)
2430 return ERR_PTR(-EINVAL);
2433 static inline void copy_last_highmem_page(void) {}
2434 static inline int last_highmem_page_copied(void) { return 1; }
2435 static inline void free_highmem_data(void) {}
2436 #endif /* CONFIG_HIGHMEM */
2438 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2441 * prepare_image - Make room for loading hibernation image.
2442 * @new_bm: Unitialized memory bitmap structure.
2443 * @bm: Memory bitmap with unsafe pages marked.
2445 * Use @bm to mark the pages that will be overwritten in the process of
2446 * restoring the system memory state from the suspend image ("unsafe" pages)
2447 * and allocate memory for the image.
2449 * The idea is to allocate a new memory bitmap first and then allocate
2450 * as many pages as needed for image data, but without specifying what those
2451 * pages will be used for just yet. Instead, we mark them all as allocated and
2452 * create a lists of "safe" pages to be used later. On systems with high
2453 * memory a list of "safe" highmem pages is created too.
2455 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2457 unsigned int nr_pages, nr_highmem;
2458 struct linked_page *lp;
2459 int error;
2461 /* If there is no highmem, the buffer will not be necessary */
2462 free_image_page(buffer, PG_UNSAFE_CLEAR);
2463 buffer = NULL;
2465 nr_highmem = count_highmem_image_pages(bm);
2466 mark_unsafe_pages(bm);
2468 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2469 if (error)
2470 goto Free;
2472 duplicate_memory_bitmap(new_bm, bm);
2473 memory_bm_free(bm, PG_UNSAFE_KEEP);
2474 if (nr_highmem > 0) {
2475 error = prepare_highmem_image(bm, &nr_highmem);
2476 if (error)
2477 goto Free;
2480 * Reserve some safe pages for potential later use.
2482 * NOTE: This way we make sure there will be enough safe pages for the
2483 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2484 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2486 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2488 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2489 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2490 while (nr_pages > 0) {
2491 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2492 if (!lp) {
2493 error = -ENOMEM;
2494 goto Free;
2496 lp->next = safe_pages_list;
2497 safe_pages_list = lp;
2498 nr_pages--;
2500 /* Preallocate memory for the image */
2501 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2502 while (nr_pages > 0) {
2503 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2504 if (!lp) {
2505 error = -ENOMEM;
2506 goto Free;
2508 if (!swsusp_page_is_free(virt_to_page(lp))) {
2509 /* The page is "safe", add it to the list */
2510 lp->next = safe_pages_list;
2511 safe_pages_list = lp;
2513 /* Mark the page as allocated */
2514 swsusp_set_page_forbidden(virt_to_page(lp));
2515 swsusp_set_page_free(virt_to_page(lp));
2516 nr_pages--;
2518 return 0;
2520 Free:
2521 swsusp_free();
2522 return error;
2526 * get_buffer - Get the address to store the next image data page.
2528 * Get the address that snapshot_write_next() should return to its caller to
2529 * write to.
2531 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2533 struct pbe *pbe;
2534 struct page *page;
2535 unsigned long pfn = memory_bm_next_pfn(bm);
2537 if (pfn == BM_END_OF_MAP)
2538 return ERR_PTR(-EFAULT);
2540 page = pfn_to_page(pfn);
2541 if (PageHighMem(page))
2542 return get_highmem_page_buffer(page, ca);
2544 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2546 * We have allocated the "original" page frame and we can
2547 * use it directly to store the loaded page.
2549 return page_address(page);
2552 * The "original" page frame has not been allocated and we have to
2553 * use a "safe" page frame to store the loaded page.
2555 pbe = chain_alloc(ca, sizeof(struct pbe));
2556 if (!pbe) {
2557 swsusp_free();
2558 return ERR_PTR(-ENOMEM);
2560 pbe->orig_address = page_address(page);
2561 pbe->address = safe_pages_list;
2562 safe_pages_list = safe_pages_list->next;
2563 pbe->next = restore_pblist;
2564 restore_pblist = pbe;
2565 return pbe->address;
2569 * snapshot_write_next - Get the address to store the next image page.
2570 * @handle: Snapshot handle structure to guide the writing.
2572 * On the first call, @handle should point to a zeroed snapshot_handle
2573 * structure. The structure gets populated then and a pointer to it should be
2574 * passed to this function every next time.
2576 * On success, the function returns a positive number. Then, the caller
2577 * is allowed to write up to the returned number of bytes to the memory
2578 * location computed by the data_of() macro.
2580 * The function returns 0 to indicate the "end of file" condition. Negative
2581 * numbers are returned on errors, in which cases the structure pointed to by
2582 * @handle is not updated and should not be used any more.
2584 int snapshot_write_next(struct snapshot_handle *handle)
2586 static struct chain_allocator ca;
2587 int error = 0;
2589 /* Check if we have already loaded the entire image */
2590 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2591 return 0;
2593 handle->sync_read = 1;
2595 if (!handle->cur) {
2596 if (!buffer)
2597 /* This makes the buffer be freed by swsusp_free() */
2598 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2600 if (!buffer)
2601 return -ENOMEM;
2603 handle->buffer = buffer;
2604 } else if (handle->cur == 1) {
2605 error = load_header(buffer);
2606 if (error)
2607 return error;
2609 safe_pages_list = NULL;
2611 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2612 if (error)
2613 return error;
2615 /* Allocate buffer for page keys. */
2616 error = page_key_alloc(nr_copy_pages);
2617 if (error)
2618 return error;
2620 hibernate_restore_protection_begin();
2621 } else if (handle->cur <= nr_meta_pages + 1) {
2622 error = unpack_orig_pfns(buffer, &copy_bm);
2623 if (error)
2624 return error;
2626 if (handle->cur == nr_meta_pages + 1) {
2627 error = prepare_image(&orig_bm, &copy_bm);
2628 if (error)
2629 return error;
2631 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2632 memory_bm_position_reset(&orig_bm);
2633 restore_pblist = NULL;
2634 handle->buffer = get_buffer(&orig_bm, &ca);
2635 handle->sync_read = 0;
2636 if (IS_ERR(handle->buffer))
2637 return PTR_ERR(handle->buffer);
2639 } else {
2640 copy_last_highmem_page();
2641 /* Restore page key for data page (s390 only). */
2642 page_key_write(handle->buffer);
2643 hibernate_restore_protect_page(handle->buffer);
2644 handle->buffer = get_buffer(&orig_bm, &ca);
2645 if (IS_ERR(handle->buffer))
2646 return PTR_ERR(handle->buffer);
2647 if (handle->buffer != buffer)
2648 handle->sync_read = 0;
2650 handle->cur++;
2651 return PAGE_SIZE;
2655 * snapshot_write_finalize - Complete the loading of a hibernation image.
2657 * Must be called after the last call to snapshot_write_next() in case the last
2658 * page in the image happens to be a highmem page and its contents should be
2659 * stored in highmem. Additionally, it recycles bitmap memory that's not
2660 * necessary any more.
2662 void snapshot_write_finalize(struct snapshot_handle *handle)
2664 copy_last_highmem_page();
2665 /* Restore page key for data page (s390 only). */
2666 page_key_write(handle->buffer);
2667 page_key_free();
2668 hibernate_restore_protect_page(handle->buffer);
2669 /* Do that only if we have loaded the image entirely */
2670 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2671 memory_bm_recycle(&orig_bm);
2672 free_highmem_data();
2676 int snapshot_image_loaded(struct snapshot_handle *handle)
2678 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2679 handle->cur <= nr_meta_pages + nr_copy_pages);
2682 #ifdef CONFIG_HIGHMEM
2683 /* Assumes that @buf is ready and points to a "safe" page */
2684 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2685 void *buf)
2687 void *kaddr1, *kaddr2;
2689 kaddr1 = kmap_atomic(p1);
2690 kaddr2 = kmap_atomic(p2);
2691 copy_page(buf, kaddr1);
2692 copy_page(kaddr1, kaddr2);
2693 copy_page(kaddr2, buf);
2694 kunmap_atomic(kaddr2);
2695 kunmap_atomic(kaddr1);
2699 * restore_highmem - Put highmem image pages into their original locations.
2701 * For each highmem page that was in use before hibernation and is included in
2702 * the image, and also has been allocated by the "restore" kernel, swap its
2703 * current contents with the previous (ie. "before hibernation") ones.
2705 * If the restore eventually fails, we can call this function once again and
2706 * restore the highmem state as seen by the restore kernel.
2708 int restore_highmem(void)
2710 struct highmem_pbe *pbe = highmem_pblist;
2711 void *buf;
2713 if (!pbe)
2714 return 0;
2716 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2717 if (!buf)
2718 return -ENOMEM;
2720 while (pbe) {
2721 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2722 pbe = pbe->next;
2724 free_image_page(buf, PG_UNSAFE_CLEAR);
2725 return 0;
2727 #endif /* CONFIG_HIGHMEM */