2 * linux/kernel/power/snapshot.c
4 * This file provide system snapshot/restore functionality.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
8 * This file is released under the GPLv2, and is based on swsusp.c.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
37 struct pbe
*pagedir_nosave
;
38 static unsigned int nr_copy_pages
;
39 static unsigned int nr_meta_pages
;
40 static unsigned long *buffer
;
43 unsigned int count_highmem_pages(void)
46 unsigned long zone_pfn
;
50 if (is_highmem(zone
)) {
51 mark_free_pages(zone
);
52 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; zone_pfn
++) {
54 unsigned long pfn
= zone_pfn
+ zone
->zone_start_pfn
;
57 page
= pfn_to_page(pfn
);
58 if (PageReserved(page
))
60 if (PageNosaveFree(page
))
71 struct highmem_page
*next
;
74 static struct highmem_page
*highmem_copy
;
76 static int save_highmem_zone(struct zone
*zone
)
78 unsigned long zone_pfn
;
79 mark_free_pages(zone
);
80 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
) {
82 struct highmem_page
*save
;
84 unsigned long pfn
= zone_pfn
+ zone
->zone_start_pfn
;
90 page
= pfn_to_page(pfn
);
92 * This condition results from rvmalloc() sans vmalloc_32()
93 * and architectural memory reservations. This should be
94 * corrected eventually when the cases giving rise to this
95 * are better understood.
97 if (PageReserved(page
))
99 BUG_ON(PageNosave(page
));
100 if (PageNosaveFree(page
))
102 save
= kmalloc(sizeof(struct highmem_page
), GFP_ATOMIC
);
105 save
->next
= highmem_copy
;
107 save
->data
= (void *) get_zeroed_page(GFP_ATOMIC
);
112 kaddr
= kmap_atomic(page
, KM_USER0
);
113 memcpy(save
->data
, kaddr
, PAGE_SIZE
);
114 kunmap_atomic(kaddr
, KM_USER0
);
120 int save_highmem(void)
125 pr_debug("swsusp: Saving Highmem");
127 for_each_zone (zone
) {
128 if (is_highmem(zone
))
129 res
= save_highmem_zone(zone
);
137 int restore_highmem(void)
139 printk("swsusp: Restoring Highmem\n");
140 while (highmem_copy
) {
141 struct highmem_page
*save
= highmem_copy
;
143 highmem_copy
= save
->next
;
145 kaddr
= kmap_atomic(save
->page
, KM_USER0
);
146 memcpy(kaddr
, save
->data
, PAGE_SIZE
);
147 kunmap_atomic(kaddr
, KM_USER0
);
148 free_page((long) save
->data
);
154 static inline unsigned int count_highmem_pages(void) {return 0;}
155 static inline int save_highmem(void) {return 0;}
156 static inline int restore_highmem(void) {return 0;}
159 static int pfn_is_nosave(unsigned long pfn
)
161 unsigned long nosave_begin_pfn
= __pa(&__nosave_begin
) >> PAGE_SHIFT
;
162 unsigned long nosave_end_pfn
= PAGE_ALIGN(__pa(&__nosave_end
)) >> PAGE_SHIFT
;
163 return (pfn
>= nosave_begin_pfn
) && (pfn
< nosave_end_pfn
);
167 * saveable - Determine whether a page should be cloned or not.
170 * We save a page if it's Reserved, and not in the range of pages
171 * statically defined as 'unsaveable', or if it isn't reserved, and
172 * isn't part of a free chunk of pages.
175 static int saveable(struct zone
*zone
, unsigned long *zone_pfn
)
177 unsigned long pfn
= *zone_pfn
+ zone
->zone_start_pfn
;
183 page
= pfn_to_page(pfn
);
184 BUG_ON(PageReserved(page
) && PageNosave(page
));
185 if (PageNosave(page
))
187 if (PageReserved(page
) && pfn_is_nosave(pfn
))
189 if (PageNosaveFree(page
))
195 unsigned int count_data_pages(void)
198 unsigned long zone_pfn
;
201 for_each_zone (zone
) {
202 if (is_highmem(zone
))
204 mark_free_pages(zone
);
205 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
206 n
+= saveable(zone
, &zone_pfn
);
211 static void copy_data_pages(struct pbe
*pblist
)
214 unsigned long zone_pfn
;
218 for_each_zone (zone
) {
219 if (is_highmem(zone
))
221 mark_free_pages(zone
);
222 /* This is necessary for swsusp_free() */
223 for_each_pb_page (p
, pblist
)
224 SetPageNosaveFree(virt_to_page(p
));
225 for_each_pbe (p
, pblist
)
226 SetPageNosaveFree(virt_to_page(p
->address
));
227 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
) {
228 if (saveable(zone
, &zone_pfn
)) {
230 page
= pfn_to_page(zone_pfn
+ zone
->zone_start_pfn
);
232 pbe
->orig_address
= (unsigned long)page_address(page
);
233 /* copy_page is not usable for copying task structs. */
234 memcpy((void *)pbe
->address
, (void *)pbe
->orig_address
, PAGE_SIZE
);
244 * free_pagedir - free pages allocated with alloc_pagedir()
247 static void free_pagedir(struct pbe
*pblist
, int clear_nosave_free
)
252 pbe
= (pblist
+ PB_PAGE_SKIP
)->next
;
253 ClearPageNosave(virt_to_page(pblist
));
254 if (clear_nosave_free
)
255 ClearPageNosaveFree(virt_to_page(pblist
));
256 free_page((unsigned long)pblist
);
262 * fill_pb_page - Create a list of PBEs on a given memory page
265 static inline void fill_pb_page(struct pbe
*pbpage
)
270 pbpage
+= PB_PAGE_SKIP
;
273 while (++p
< pbpage
);
277 * create_pbe_list - Create a list of PBEs on top of a given chain
278 * of memory pages allocated with alloc_pagedir()
281 static inline void create_pbe_list(struct pbe
*pblist
, unsigned int nr_pages
)
283 struct pbe
*pbpage
, *p
;
284 unsigned int num
= PBES_PER_PAGE
;
286 for_each_pb_page (pbpage
, pblist
) {
290 fill_pb_page(pbpage
);
291 num
+= PBES_PER_PAGE
;
294 for (num
-= PBES_PER_PAGE
- 1, p
= pbpage
; num
< nr_pages
; p
++, num
++)
300 static unsigned int unsafe_pages
;
303 * @safe_needed - on resume, for storing the PBE list and the image,
304 * we can only use memory pages that do not conflict with the pages
305 * used before suspend.
307 * The unsafe pages are marked with the PG_nosave_free flag
308 * and we count them using unsafe_pages
311 static inline void *alloc_image_page(gfp_t gfp_mask
, int safe_needed
)
315 res
= (void *)get_zeroed_page(gfp_mask
);
317 while (res
&& PageNosaveFree(virt_to_page(res
))) {
318 /* The page is unsafe, mark it for swsusp_free() */
319 SetPageNosave(virt_to_page(res
));
321 res
= (void *)get_zeroed_page(gfp_mask
);
324 SetPageNosave(virt_to_page(res
));
325 SetPageNosaveFree(virt_to_page(res
));
330 unsigned long get_safe_page(gfp_t gfp_mask
)
332 return (unsigned long)alloc_image_page(gfp_mask
, 1);
336 * alloc_pagedir - Allocate the page directory.
338 * First, determine exactly how many pages we need and
341 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
342 * struct pbe elements (pbes) and the last element in the page points
345 * On each page we set up a list of struct_pbe elements.
348 static struct pbe
*alloc_pagedir(unsigned int nr_pages
, gfp_t gfp_mask
,
352 struct pbe
*pblist
, *pbe
;
357 pblist
= alloc_image_page(gfp_mask
, safe_needed
);
358 /* FIXME: rewrite this ugly loop */
359 for (pbe
= pblist
, num
= PBES_PER_PAGE
; pbe
&& num
< nr_pages
;
360 pbe
= pbe
->next
, num
+= PBES_PER_PAGE
) {
362 pbe
->next
= alloc_image_page(gfp_mask
, safe_needed
);
364 if (!pbe
) { /* get_zeroed_page() failed */
365 free_pagedir(pblist
, 1);
368 create_pbe_list(pblist
, nr_pages
);
373 * Free pages we allocated for suspend. Suspend pages are alocated
374 * before atomic copy, so we need to free them after resume.
377 void swsusp_free(void)
380 unsigned long zone_pfn
;
382 for_each_zone(zone
) {
383 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
384 if (pfn_valid(zone_pfn
+ zone
->zone_start_pfn
)) {
386 page
= pfn_to_page(zone_pfn
+ zone
->zone_start_pfn
);
387 if (PageNosave(page
) && PageNosaveFree(page
)) {
388 ClearPageNosave(page
);
389 ClearPageNosaveFree(page
);
390 free_page((long) page_address(page
));
396 pagedir_nosave
= NULL
;
402 * enough_free_mem - Make sure we enough free memory to snapshot.
404 * Returns TRUE or FALSE after checking the number of available
408 static int enough_free_mem(unsigned int nr_pages
)
414 if (!is_highmem(zone
))
415 n
+= zone
->free_pages
;
416 pr_debug("swsusp: available memory: %u pages\n", n
);
417 return n
> (nr_pages
+ PAGES_FOR_IO
+
418 (nr_pages
+ PBES_PER_PAGE
- 1) / PBES_PER_PAGE
);
421 static int alloc_data_pages(struct pbe
*pblist
, gfp_t gfp_mask
, int safe_needed
)
425 for_each_pbe (p
, pblist
) {
426 p
->address
= (unsigned long)alloc_image_page(gfp_mask
, safe_needed
);
433 static struct pbe
*swsusp_alloc(unsigned int nr_pages
)
437 if (!(pblist
= alloc_pagedir(nr_pages
, GFP_ATOMIC
| __GFP_COLD
, 0))) {
438 printk(KERN_ERR
"suspend: Allocating pagedir failed.\n");
442 if (alloc_data_pages(pblist
, GFP_ATOMIC
| __GFP_COLD
, 0)) {
443 printk(KERN_ERR
"suspend: Allocating image pages failed.\n");
451 asmlinkage
int swsusp_save(void)
453 unsigned int nr_pages
;
455 pr_debug("swsusp: critical section: \n");
458 nr_pages
= count_data_pages();
459 printk("swsusp: Need to copy %u pages\n", nr_pages
);
461 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
463 (nr_pages
+ PBES_PER_PAGE
- 1) / PBES_PER_PAGE
,
464 PAGES_FOR_IO
, nr_free_pages());
466 if (!enough_free_mem(nr_pages
)) {
467 printk(KERN_ERR
"swsusp: Not enough free memory\n");
471 pagedir_nosave
= swsusp_alloc(nr_pages
);
475 /* During allocating of suspend pagedir, new cold pages may appear.
479 copy_data_pages(pagedir_nosave
);
482 * End of critical section. From now on, we can write to memory,
483 * but we should not touch disk. This specially means we must _not_
484 * touch swap space! Except we must write out our image of course.
487 nr_copy_pages
= nr_pages
;
488 nr_meta_pages
= (nr_pages
* sizeof(long) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
490 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages
);
494 static void init_header(struct swsusp_info
*info
)
496 memset(info
, 0, sizeof(struct swsusp_info
));
497 info
->version_code
= LINUX_VERSION_CODE
;
498 info
->num_physpages
= num_physpages
;
499 memcpy(&info
->uts
, &system_utsname
, sizeof(system_utsname
));
500 info
->cpus
= num_online_cpus();
501 info
->image_pages
= nr_copy_pages
;
502 info
->pages
= nr_copy_pages
+ nr_meta_pages
+ 1;
503 info
->size
= info
->pages
;
504 info
->size
<<= PAGE_SHIFT
;
508 * pack_orig_addresses - the .orig_address fields of the PBEs from the
509 * list starting at @pbe are stored in the array @buf[] (1 page)
512 static inline struct pbe
*pack_orig_addresses(unsigned long *buf
, struct pbe
*pbe
)
516 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long) && pbe
; j
++) {
517 buf
[j
] = pbe
->orig_address
;
521 for (; j
< PAGE_SIZE
/ sizeof(long); j
++)
527 * snapshot_read_next - used for reading the system memory snapshot.
529 * On the first call to it @handle should point to a zeroed
530 * snapshot_handle structure. The structure gets updated and a pointer
531 * to it should be passed to this function every next time.
533 * The @count parameter should contain the number of bytes the caller
534 * wants to read from the snapshot. It must not be zero.
536 * On success the function returns a positive number. Then, the caller
537 * is allowed to read up to the returned number of bytes from the memory
538 * location computed by the data_of() macro. The number returned
539 * may be smaller than @count, but this only happens if the read would
540 * cross a page boundary otherwise.
542 * The function returns 0 to indicate the end of data stream condition,
543 * and a negative number is returned on error. In such cases the
544 * structure pointed to by @handle is not updated and should not be used
548 int snapshot_read_next(struct snapshot_handle
*handle
, size_t count
)
550 if (handle
->page
> nr_meta_pages
+ nr_copy_pages
)
553 /* This makes the buffer be freed by swsusp_free() */
554 buffer
= alloc_image_page(GFP_ATOMIC
, 0);
558 if (!handle
->offset
) {
559 init_header((struct swsusp_info
*)buffer
);
560 handle
->buffer
= buffer
;
561 handle
->pbe
= pagedir_nosave
;
563 if (handle
->prev
< handle
->page
) {
564 if (handle
->page
<= nr_meta_pages
) {
565 handle
->pbe
= pack_orig_addresses(buffer
, handle
->pbe
);
567 handle
->pbe
= pagedir_nosave
;
569 handle
->buffer
= (void *)handle
->pbe
->address
;
570 handle
->pbe
= handle
->pbe
->next
;
572 handle
->prev
= handle
->page
;
574 handle
->buf_offset
= handle
->page_offset
;
575 if (handle
->page_offset
+ count
>= PAGE_SIZE
) {
576 count
= PAGE_SIZE
- handle
->page_offset
;
577 handle
->page_offset
= 0;
580 handle
->page_offset
+= count
;
582 handle
->offset
+= count
;
587 * mark_unsafe_pages - mark the pages that cannot be used for storing
588 * the image during resume, because they conflict with the pages that
589 * had been used before suspend
592 static int mark_unsafe_pages(struct pbe
*pblist
)
595 unsigned long zone_pfn
;
598 if (!pblist
) /* a sanity check */
601 /* Clear page flags */
602 for_each_zone (zone
) {
603 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
604 if (pfn_valid(zone_pfn
+ zone
->zone_start_pfn
))
605 ClearPageNosaveFree(pfn_to_page(zone_pfn
+
606 zone
->zone_start_pfn
));
609 /* Mark orig addresses */
610 for_each_pbe (p
, pblist
) {
611 if (virt_addr_valid(p
->orig_address
))
612 SetPageNosaveFree(virt_to_page(p
->orig_address
));
622 static void copy_page_backup_list(struct pbe
*dst
, struct pbe
*src
)
624 /* We assume both lists contain the same number of elements */
626 dst
->orig_address
= src
->orig_address
;
632 static int check_header(struct swsusp_info
*info
)
636 if (info
->version_code
!= LINUX_VERSION_CODE
)
637 reason
= "kernel version";
638 if (info
->num_physpages
!= num_physpages
)
639 reason
= "memory size";
640 if (strcmp(info
->uts
.sysname
,system_utsname
.sysname
))
641 reason
= "system type";
642 if (strcmp(info
->uts
.release
,system_utsname
.release
))
643 reason
= "kernel release";
644 if (strcmp(info
->uts
.version
,system_utsname
.version
))
646 if (strcmp(info
->uts
.machine
,system_utsname
.machine
))
649 printk(KERN_ERR
"swsusp: Resume mismatch: %s\n", reason
);
656 * load header - check the image header and copy data from it
659 static int load_header(struct snapshot_handle
*handle
,
660 struct swsusp_info
*info
)
665 error
= check_header(info
);
667 pblist
= alloc_pagedir(info
->image_pages
, GFP_ATOMIC
, 0);
670 pagedir_nosave
= pblist
;
671 handle
->pbe
= pblist
;
672 nr_copy_pages
= info
->image_pages
;
673 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
679 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
680 * the PBEs in the list starting at @pbe
683 static inline struct pbe
*unpack_orig_addresses(unsigned long *buf
,
688 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long) && pbe
; j
++) {
689 pbe
->orig_address
= buf
[j
];
696 * prepare_image - use metadata contained in the PBE list
697 * pointed to by pagedir_nosave to mark the pages that will
698 * be overwritten in the process of restoring the system
699 * memory state from the image ("unsafe" pages) and allocate
700 * memory for the image
702 * The idea is to allocate the PBE list first and then
703 * allocate as many pages as it's needed for the image data,
704 * but not to assign these pages to the PBEs initially.
705 * Instead, we just mark them as allocated and create a list
706 * of "safe" which will be used later
710 struct safe_page
*next
;
711 char padding
[PAGE_SIZE
- sizeof(void *)];
714 static struct safe_page
*safe_pages
;
716 static int prepare_image(struct snapshot_handle
*handle
)
719 unsigned int nr_pages
= nr_copy_pages
;
720 struct pbe
*p
, *pblist
= NULL
;
723 error
= mark_unsafe_pages(p
);
725 pblist
= alloc_pagedir(nr_pages
, GFP_ATOMIC
, 1);
727 copy_page_backup_list(pblist
, p
);
733 if (!error
&& nr_pages
> unsafe_pages
) {
734 nr_pages
-= unsafe_pages
;
736 struct safe_page
*ptr
;
738 ptr
= (struct safe_page
*)get_zeroed_page(GFP_ATOMIC
);
743 if (!PageNosaveFree(virt_to_page(ptr
))) {
744 /* The page is "safe", add it to the list */
745 ptr
->next
= safe_pages
;
748 /* Mark the page as allocated */
749 SetPageNosave(virt_to_page(ptr
));
750 SetPageNosaveFree(virt_to_page(ptr
));
754 pagedir_nosave
= pblist
;
762 static void *get_buffer(struct snapshot_handle
*handle
)
764 struct pbe
*pbe
= handle
->pbe
, *last
= handle
->last_pbe
;
765 struct page
*page
= virt_to_page(pbe
->orig_address
);
767 if (PageNosave(page
) && PageNosaveFree(page
)) {
769 * We have allocated the "original" page frame and we can
770 * use it directly to store the read page
773 if (last
&& last
->next
)
775 return (void *)pbe
->orig_address
;
778 * The "original" page frame has not been allocated and we have to
779 * use a "safe" page frame to store the read page
781 pbe
->address
= (unsigned long)safe_pages
;
782 safe_pages
= safe_pages
->next
;
785 handle
->last_pbe
= pbe
;
786 return (void *)pbe
->address
;
790 * snapshot_write_next - used for writing the system memory snapshot.
792 * On the first call to it @handle should point to a zeroed
793 * snapshot_handle structure. The structure gets updated and a pointer
794 * to it should be passed to this function every next time.
796 * The @count parameter should contain the number of bytes the caller
797 * wants to write to the image. It must not be zero.
799 * On success the function returns a positive number. Then, the caller
800 * is allowed to write up to the returned number of bytes to the memory
801 * location computed by the data_of() macro. The number returned
802 * may be smaller than @count, but this only happens if the write would
803 * cross a page boundary otherwise.
805 * The function returns 0 to indicate the "end of file" condition,
806 * and a negative number is returned on error. In such cases the
807 * structure pointed to by @handle is not updated and should not be used
811 int snapshot_write_next(struct snapshot_handle
*handle
, size_t count
)
815 if (handle
->prev
&& handle
->page
> nr_meta_pages
+ nr_copy_pages
)
818 /* This makes the buffer be freed by swsusp_free() */
819 buffer
= alloc_image_page(GFP_ATOMIC
, 0);
824 handle
->buffer
= buffer
;
825 if (handle
->prev
< handle
->page
) {
827 error
= load_header(handle
, (struct swsusp_info
*)buffer
);
830 } else if (handle
->prev
<= nr_meta_pages
) {
831 handle
->pbe
= unpack_orig_addresses(buffer
, handle
->pbe
);
833 error
= prepare_image(handle
);
836 handle
->pbe
= pagedir_nosave
;
837 handle
->last_pbe
= NULL
;
838 handle
->buffer
= get_buffer(handle
);
841 handle
->pbe
= handle
->pbe
->next
;
842 handle
->buffer
= get_buffer(handle
);
844 handle
->prev
= handle
->page
;
846 handle
->buf_offset
= handle
->page_offset
;
847 if (handle
->page_offset
+ count
>= PAGE_SIZE
) {
848 count
= PAGE_SIZE
- handle
->page_offset
;
849 handle
->page_offset
= 0;
852 handle
->page_offset
+= count
;
854 handle
->offset
+= count
;
858 int snapshot_image_loaded(struct snapshot_handle
*handle
)
860 return !(!handle
->pbe
|| handle
->pbe
->next
|| !nr_copy_pages
||
861 handle
->page
<= nr_meta_pages
+ nr_copy_pages
);