2 * linux/kernel/power/snapshot.c
4 * This file provide system snapshot/restore functionality.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
8 * This file is released under the GPLv2, and is based on swsusp.c.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
37 struct pbe
*pagedir_nosave
;
38 static unsigned int nr_copy_pages
;
39 static unsigned int nr_meta_pages
;
40 static unsigned long *buffer
;
43 unsigned int count_highmem_pages(void)
46 unsigned long zone_pfn
;
50 if (is_highmem(zone
)) {
51 mark_free_pages(zone
);
52 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; zone_pfn
++) {
54 unsigned long pfn
= zone_pfn
+ zone
->zone_start_pfn
;
57 page
= pfn_to_page(pfn
);
58 if (PageReserved(page
))
60 if (PageNosaveFree(page
))
71 struct highmem_page
*next
;
74 static struct highmem_page
*highmem_copy
;
76 static int save_highmem_zone(struct zone
*zone
)
78 unsigned long zone_pfn
;
79 mark_free_pages(zone
);
80 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
) {
82 struct highmem_page
*save
;
84 unsigned long pfn
= zone_pfn
+ zone
->zone_start_pfn
;
90 page
= pfn_to_page(pfn
);
92 * This condition results from rvmalloc() sans vmalloc_32()
93 * and architectural memory reservations. This should be
94 * corrected eventually when the cases giving rise to this
95 * are better understood.
97 if (PageReserved(page
))
99 BUG_ON(PageNosave(page
));
100 if (PageNosaveFree(page
))
102 save
= kmalloc(sizeof(struct highmem_page
), GFP_ATOMIC
);
105 save
->next
= highmem_copy
;
107 save
->data
= (void *) get_zeroed_page(GFP_ATOMIC
);
112 kaddr
= kmap_atomic(page
, KM_USER0
);
113 memcpy(save
->data
, kaddr
, PAGE_SIZE
);
114 kunmap_atomic(kaddr
, KM_USER0
);
120 int save_highmem(void)
125 pr_debug("swsusp: Saving Highmem");
127 for_each_zone (zone
) {
128 if (is_highmem(zone
))
129 res
= save_highmem_zone(zone
);
137 int restore_highmem(void)
139 printk("swsusp: Restoring Highmem\n");
140 while (highmem_copy
) {
141 struct highmem_page
*save
= highmem_copy
;
143 highmem_copy
= save
->next
;
145 kaddr
= kmap_atomic(save
->page
, KM_USER0
);
146 memcpy(kaddr
, save
->data
, PAGE_SIZE
);
147 kunmap_atomic(kaddr
, KM_USER0
);
148 free_page((long) save
->data
);
155 static int pfn_is_nosave(unsigned long pfn
)
157 unsigned long nosave_begin_pfn
= __pa(&__nosave_begin
) >> PAGE_SHIFT
;
158 unsigned long nosave_end_pfn
= PAGE_ALIGN(__pa(&__nosave_end
)) >> PAGE_SHIFT
;
159 return (pfn
>= nosave_begin_pfn
) && (pfn
< nosave_end_pfn
);
163 * saveable - Determine whether a page should be cloned or not.
166 * We save a page if it's Reserved, and not in the range of pages
167 * statically defined as 'unsaveable', or if it isn't reserved, and
168 * isn't part of a free chunk of pages.
171 static int saveable(struct zone
*zone
, unsigned long *zone_pfn
)
173 unsigned long pfn
= *zone_pfn
+ zone
->zone_start_pfn
;
179 page
= pfn_to_page(pfn
);
180 BUG_ON(PageReserved(page
) && PageNosave(page
));
181 if (PageNosave(page
))
183 if (PageReserved(page
) && pfn_is_nosave(pfn
))
185 if (PageNosaveFree(page
))
191 unsigned int count_data_pages(void)
194 unsigned long zone_pfn
;
197 for_each_zone (zone
) {
198 if (is_highmem(zone
))
200 mark_free_pages(zone
);
201 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
202 n
+= saveable(zone
, &zone_pfn
);
207 static void copy_data_pages(struct pbe
*pblist
)
210 unsigned long zone_pfn
;
214 for_each_zone (zone
) {
215 if (is_highmem(zone
))
217 mark_free_pages(zone
);
218 /* This is necessary for swsusp_free() */
219 for_each_pb_page (p
, pblist
)
220 SetPageNosaveFree(virt_to_page(p
));
221 for_each_pbe (p
, pblist
)
222 SetPageNosaveFree(virt_to_page(p
->address
));
223 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
) {
224 if (saveable(zone
, &zone_pfn
)) {
226 page
= pfn_to_page(zone_pfn
+ zone
->zone_start_pfn
);
228 pbe
->orig_address
= (unsigned long)page_address(page
);
229 /* copy_page is not usable for copying task structs. */
230 memcpy((void *)pbe
->address
, (void *)pbe
->orig_address
, PAGE_SIZE
);
240 * free_pagedir - free pages allocated with alloc_pagedir()
243 static void free_pagedir(struct pbe
*pblist
, int clear_nosave_free
)
248 pbe
= (pblist
+ PB_PAGE_SKIP
)->next
;
249 ClearPageNosave(virt_to_page(pblist
));
250 if (clear_nosave_free
)
251 ClearPageNosaveFree(virt_to_page(pblist
));
252 free_page((unsigned long)pblist
);
258 * fill_pb_page - Create a list of PBEs on a given memory page
261 static inline void fill_pb_page(struct pbe
*pbpage
)
266 pbpage
+= PB_PAGE_SKIP
;
269 while (++p
< pbpage
);
273 * create_pbe_list - Create a list of PBEs on top of a given chain
274 * of memory pages allocated with alloc_pagedir()
277 static inline void create_pbe_list(struct pbe
*pblist
, unsigned int nr_pages
)
279 struct pbe
*pbpage
, *p
;
280 unsigned int num
= PBES_PER_PAGE
;
282 for_each_pb_page (pbpage
, pblist
) {
286 fill_pb_page(pbpage
);
287 num
+= PBES_PER_PAGE
;
290 for (num
-= PBES_PER_PAGE
- 1, p
= pbpage
; num
< nr_pages
; p
++, num
++)
297 * On resume it is necessary to trace and eventually free the unsafe
298 * pages that have been allocated, because they are needed for I/O
299 * (on x86-64 we likely will "eat" these pages once again while
300 * creating the temporary page translation tables)
304 struct eaten_page
*next
;
305 char padding
[PAGE_SIZE
- sizeof(void *)];
308 static struct eaten_page
*eaten_pages
= NULL
;
310 static void release_eaten_pages(void)
312 struct eaten_page
*p
, *q
;
317 /* We don't want swsusp_free() to free this page again */
318 ClearPageNosave(virt_to_page(p
));
319 free_page((unsigned long)p
);
326 * @safe_needed - on resume, for storing the PBE list and the image,
327 * we can only use memory pages that do not conflict with the pages
328 * which had been used before suspend.
330 * The unsafe pages are marked with the PG_nosave_free flag
332 * Allocated but unusable (ie eaten) memory pages should be marked
333 * so that swsusp_free() can release them
336 static inline void *alloc_image_page(gfp_t gfp_mask
, int safe_needed
)
342 res
= (void *)get_zeroed_page(gfp_mask
);
343 if (res
&& PageNosaveFree(virt_to_page(res
))) {
344 /* This is for swsusp_free() */
345 SetPageNosave(virt_to_page(res
));
346 ((struct eaten_page
*)res
)->next
= eaten_pages
;
349 } while (res
&& PageNosaveFree(virt_to_page(res
)));
351 res
= (void *)get_zeroed_page(gfp_mask
);
353 SetPageNosave(virt_to_page(res
));
354 SetPageNosaveFree(virt_to_page(res
));
359 unsigned long get_safe_page(gfp_t gfp_mask
)
361 return (unsigned long)alloc_image_page(gfp_mask
, 1);
365 * alloc_pagedir - Allocate the page directory.
367 * First, determine exactly how many pages we need and
370 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
371 * struct pbe elements (pbes) and the last element in the page points
374 * On each page we set up a list of struct_pbe elements.
377 struct pbe
*alloc_pagedir(unsigned int nr_pages
, gfp_t gfp_mask
, int safe_needed
)
380 struct pbe
*pblist
, *pbe
;
385 pblist
= alloc_image_page(gfp_mask
, safe_needed
);
386 /* FIXME: rewrite this ugly loop */
387 for (pbe
= pblist
, num
= PBES_PER_PAGE
; pbe
&& num
< nr_pages
;
388 pbe
= pbe
->next
, num
+= PBES_PER_PAGE
) {
390 pbe
->next
= alloc_image_page(gfp_mask
, safe_needed
);
392 if (!pbe
) { /* get_zeroed_page() failed */
393 free_pagedir(pblist
, 1);
396 create_pbe_list(pblist
, nr_pages
);
401 * Free pages we allocated for suspend. Suspend pages are alocated
402 * before atomic copy, so we need to free them after resume.
405 void swsusp_free(void)
408 unsigned long zone_pfn
;
410 for_each_zone(zone
) {
411 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
412 if (pfn_valid(zone_pfn
+ zone
->zone_start_pfn
)) {
414 page
= pfn_to_page(zone_pfn
+ zone
->zone_start_pfn
);
415 if (PageNosave(page
) && PageNosaveFree(page
)) {
416 ClearPageNosave(page
);
417 ClearPageNosaveFree(page
);
418 free_page((long) page_address(page
));
424 pagedir_nosave
= NULL
;
430 * enough_free_mem - Make sure we enough free memory to snapshot.
432 * Returns TRUE or FALSE after checking the number of available
436 static int enough_free_mem(unsigned int nr_pages
)
442 if (!is_highmem(zone
))
443 n
+= zone
->free_pages
;
444 pr_debug("swsusp: available memory: %u pages\n", n
);
445 return n
> (nr_pages
+ PAGES_FOR_IO
+
446 (nr_pages
+ PBES_PER_PAGE
- 1) / PBES_PER_PAGE
);
449 static int alloc_data_pages(struct pbe
*pblist
, gfp_t gfp_mask
, int safe_needed
)
453 for_each_pbe (p
, pblist
) {
454 p
->address
= (unsigned long)alloc_image_page(gfp_mask
, safe_needed
);
461 static struct pbe
*swsusp_alloc(unsigned int nr_pages
)
465 if (!(pblist
= alloc_pagedir(nr_pages
, GFP_ATOMIC
| __GFP_COLD
, 0))) {
466 printk(KERN_ERR
"suspend: Allocating pagedir failed.\n");
470 if (alloc_data_pages(pblist
, GFP_ATOMIC
| __GFP_COLD
, 0)) {
471 printk(KERN_ERR
"suspend: Allocating image pages failed.\n");
479 asmlinkage
int swsusp_save(void)
481 unsigned int nr_pages
;
483 pr_debug("swsusp: critical section: \n");
486 nr_pages
= count_data_pages();
487 printk("swsusp: Need to copy %u pages\n", nr_pages
);
489 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
491 (nr_pages
+ PBES_PER_PAGE
- 1) / PBES_PER_PAGE
,
492 PAGES_FOR_IO
, nr_free_pages());
494 if (!enough_free_mem(nr_pages
)) {
495 printk(KERN_ERR
"swsusp: Not enough free memory\n");
499 pagedir_nosave
= swsusp_alloc(nr_pages
);
503 /* During allocating of suspend pagedir, new cold pages may appear.
507 copy_data_pages(pagedir_nosave
);
510 * End of critical section. From now on, we can write to memory,
511 * but we should not touch disk. This specially means we must _not_
512 * touch swap space! Except we must write out our image of course.
515 nr_copy_pages
= nr_pages
;
516 nr_meta_pages
= (nr_pages
* sizeof(long) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
518 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages
);
522 static void init_header(struct swsusp_info
*info
)
524 memset(info
, 0, sizeof(struct swsusp_info
));
525 info
->version_code
= LINUX_VERSION_CODE
;
526 info
->num_physpages
= num_physpages
;
527 memcpy(&info
->uts
, &system_utsname
, sizeof(system_utsname
));
528 info
->cpus
= num_online_cpus();
529 info
->image_pages
= nr_copy_pages
;
530 info
->pages
= nr_copy_pages
+ nr_meta_pages
+ 1;
531 info
->size
= info
->pages
;
532 info
->size
<<= PAGE_SHIFT
;
536 * pack_orig_addresses - the .orig_address fields of the PBEs from the
537 * list starting at @pbe are stored in the array @buf[] (1 page)
540 static inline struct pbe
*pack_orig_addresses(unsigned long *buf
, struct pbe
*pbe
)
544 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long) && pbe
; j
++) {
545 buf
[j
] = pbe
->orig_address
;
549 for (; j
< PAGE_SIZE
/ sizeof(long); j
++)
555 * snapshot_read_next - used for reading the system memory snapshot.
557 * On the first call to it @handle should point to a zeroed
558 * snapshot_handle structure. The structure gets updated and a pointer
559 * to it should be passed to this function every next time.
561 * The @count parameter should contain the number of bytes the caller
562 * wants to read from the snapshot. It must not be zero.
564 * On success the function returns a positive number. Then, the caller
565 * is allowed to read up to the returned number of bytes from the memory
566 * location computed by the data_of() macro. The number returned
567 * may be smaller than @count, but this only happens if the read would
568 * cross a page boundary otherwise.
570 * The function returns 0 to indicate the end of data stream condition,
571 * and a negative number is returned on error. In such cases the
572 * structure pointed to by @handle is not updated and should not be used
576 int snapshot_read_next(struct snapshot_handle
*handle
, size_t count
)
578 if (handle
->page
> nr_meta_pages
+ nr_copy_pages
)
581 /* This makes the buffer be freed by swsusp_free() */
582 buffer
= alloc_image_page(GFP_ATOMIC
, 0);
586 if (!handle
->offset
) {
587 init_header((struct swsusp_info
*)buffer
);
588 handle
->buffer
= buffer
;
589 handle
->pbe
= pagedir_nosave
;
591 if (handle
->prev
< handle
->page
) {
592 if (handle
->page
<= nr_meta_pages
) {
593 handle
->pbe
= pack_orig_addresses(buffer
, handle
->pbe
);
595 handle
->pbe
= pagedir_nosave
;
597 handle
->buffer
= (void *)handle
->pbe
->address
;
598 handle
->pbe
= handle
->pbe
->next
;
600 handle
->prev
= handle
->page
;
602 handle
->buf_offset
= handle
->page_offset
;
603 if (handle
->page_offset
+ count
>= PAGE_SIZE
) {
604 count
= PAGE_SIZE
- handle
->page_offset
;
605 handle
->page_offset
= 0;
608 handle
->page_offset
+= count
;
610 handle
->offset
+= count
;
615 * mark_unsafe_pages - mark the pages that cannot be used for storing
616 * the image during resume, because they conflict with the pages that
617 * had been used before suspend
620 static int mark_unsafe_pages(struct pbe
*pblist
)
623 unsigned long zone_pfn
;
626 if (!pblist
) /* a sanity check */
629 /* Clear page flags */
630 for_each_zone (zone
) {
631 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
632 if (pfn_valid(zone_pfn
+ zone
->zone_start_pfn
))
633 ClearPageNosaveFree(pfn_to_page(zone_pfn
+
634 zone
->zone_start_pfn
));
637 /* Mark orig addresses */
638 for_each_pbe (p
, pblist
) {
639 if (virt_addr_valid(p
->orig_address
))
640 SetPageNosaveFree(virt_to_page(p
->orig_address
));
648 static void copy_page_backup_list(struct pbe
*dst
, struct pbe
*src
)
650 /* We assume both lists contain the same number of elements */
652 dst
->orig_address
= src
->orig_address
;
658 static int check_header(struct swsusp_info
*info
)
662 if (info
->version_code
!= LINUX_VERSION_CODE
)
663 reason
= "kernel version";
664 if (info
->num_physpages
!= num_physpages
)
665 reason
= "memory size";
666 if (strcmp(info
->uts
.sysname
,system_utsname
.sysname
))
667 reason
= "system type";
668 if (strcmp(info
->uts
.release
,system_utsname
.release
))
669 reason
= "kernel release";
670 if (strcmp(info
->uts
.version
,system_utsname
.version
))
672 if (strcmp(info
->uts
.machine
,system_utsname
.machine
))
675 printk(KERN_ERR
"swsusp: Resume mismatch: %s\n", reason
);
682 * load header - check the image header and copy data from it
685 static int load_header(struct snapshot_handle
*handle
,
686 struct swsusp_info
*info
)
691 error
= check_header(info
);
693 pblist
= alloc_pagedir(info
->image_pages
, GFP_ATOMIC
, 0);
696 pagedir_nosave
= pblist
;
697 handle
->pbe
= pblist
;
698 nr_copy_pages
= info
->image_pages
;
699 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
705 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
706 * the PBEs in the list starting at @pbe
709 static inline struct pbe
*unpack_orig_addresses(unsigned long *buf
,
714 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long) && pbe
; j
++) {
715 pbe
->orig_address
= buf
[j
];
722 * create_image - use metadata contained in the PBE list
723 * pointed to by pagedir_nosave to mark the pages that will
724 * be overwritten in the process of restoring the system
725 * memory state from the image and allocate memory for
726 * the image avoiding these pages
729 static int create_image(struct snapshot_handle
*handle
)
732 struct pbe
*p
, *pblist
;
735 error
= mark_unsafe_pages(p
);
737 pblist
= alloc_pagedir(nr_copy_pages
, GFP_ATOMIC
, 1);
739 copy_page_backup_list(pblist
, p
);
745 error
= alloc_data_pages(pblist
, GFP_ATOMIC
, 1);
747 release_eaten_pages();
748 pagedir_nosave
= pblist
;
750 pagedir_nosave
= NULL
;
759 * snapshot_write_next - used for writing the system memory snapshot.
761 * On the first call to it @handle should point to a zeroed
762 * snapshot_handle structure. The structure gets updated and a pointer
763 * to it should be passed to this function every next time.
765 * The @count parameter should contain the number of bytes the caller
766 * wants to write to the image. It must not be zero.
768 * On success the function returns a positive number. Then, the caller
769 * is allowed to write up to the returned number of bytes to the memory
770 * location computed by the data_of() macro. The number returned
771 * may be smaller than @count, but this only happens if the write would
772 * cross a page boundary otherwise.
774 * The function returns 0 to indicate the "end of file" condition,
775 * and a negative number is returned on error. In such cases the
776 * structure pointed to by @handle is not updated and should not be used
780 int snapshot_write_next(struct snapshot_handle
*handle
, size_t count
)
784 if (handle
->prev
&& handle
->page
> nr_meta_pages
+ nr_copy_pages
)
787 /* This makes the buffer be freed by swsusp_free() */
788 buffer
= alloc_image_page(GFP_ATOMIC
, 0);
793 handle
->buffer
= buffer
;
794 if (handle
->prev
< handle
->page
) {
796 error
= load_header(handle
, (struct swsusp_info
*)buffer
);
799 } else if (handle
->prev
<= nr_meta_pages
) {
800 handle
->pbe
= unpack_orig_addresses(buffer
, handle
->pbe
);
802 error
= create_image(handle
);
805 handle
->pbe
= pagedir_nosave
;
806 handle
->buffer
= (void *)handle
->pbe
->address
;
809 handle
->pbe
= handle
->pbe
->next
;
810 handle
->buffer
= (void *)handle
->pbe
->address
;
812 handle
->prev
= handle
->page
;
814 handle
->buf_offset
= handle
->page_offset
;
815 if (handle
->page_offset
+ count
>= PAGE_SIZE
) {
816 count
= PAGE_SIZE
- handle
->page_offset
;
817 handle
->page_offset
= 0;
820 handle
->page_offset
+= count
;
822 handle
->offset
+= count
;
826 int snapshot_image_loaded(struct snapshot_handle
*handle
)
828 return !(!handle
->pbe
|| handle
->pbe
->next
|| !nr_copy_pages
||
829 handle
->page
<= nr_meta_pages
+ nr_copy_pages
);