2 * linux/kernel/power/swsusp.c
4 * This file provides code to write suspend image to swap and read it back.
6 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
9 * This file is released under the GPLv2.
11 * I'd like to thank the following people for their work:
13 * Pavel Machek <pavel@ucw.cz>:
14 * Modifications, defectiveness pointing, being with me at the very beginning,
15 * suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17.
17 * Steve Doddi <dirk@loth.demon.co.uk>:
18 * Support the possibility of hardware state restoring.
20 * Raph <grey.havens@earthling.net>:
21 * Support for preserving states of network devices and virtual console
22 * (including X and svgatextmode)
24 * Kurt Garloff <garloff@suse.de>:
25 * Straightened the critical function in order to prevent compilers from
26 * playing tricks with local variables.
28 * Andreas Mohr <a.mohr@mailto.de>
30 * Alex Badea <vampire@go.ro>:
33 * Rafael J. Wysocki <rjw@sisk.pl>
34 * Added the swap map data structure and reworked the handling of swap
36 * More state savers are welcome. Especially for the scsi layer...
38 * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt
41 #include <linux/module.h>
43 #include <linux/suspend.h>
44 #include <linux/smp_lock.h>
45 #include <linux/file.h>
46 #include <linux/utsname.h>
47 #include <linux/version.h>
48 #include <linux/delay.h>
49 #include <linux/bitops.h>
50 #include <linux/spinlock.h>
51 #include <linux/genhd.h>
52 #include <linux/kernel.h>
53 #include <linux/major.h>
54 #include <linux/swap.h>
56 #include <linux/device.h>
57 #include <linux/buffer_head.h>
58 #include <linux/swapops.h>
59 #include <linux/bootmem.h>
60 #include <linux/syscalls.h>
61 #include <linux/highmem.h>
62 #include <linux/bio.h>
64 #include <asm/uaccess.h>
65 #include <asm/mmu_context.h>
66 #include <asm/pgtable.h>
67 #include <asm/tlbflush.h>
73 * Preferred image size in bytes (tunable via /sys/power/image_size).
74 * When it is set to N, swsusp will do its best to ensure the image
75 * size will not exceed N bytes, but if that is impossible, it will
76 * try to create the smallest image possible.
78 unsigned long image_size
= 500 * 1024 * 1024;
81 unsigned int count_highmem_pages(void);
82 int save_highmem(void);
83 int restore_highmem(void);
85 static int save_highmem(void) { return 0; }
86 static int restore_highmem(void) { return 0; }
87 static unsigned int count_highmem_pages(void) { return 0; }
90 extern char resume_file
[];
92 #define SWSUSP_SIG "S1SUSPEND"
94 static struct swsusp_header
{
95 char reserved
[PAGE_SIZE
- 20 - sizeof(swp_entry_t
)];
99 } __attribute__((packed
, aligned(PAGE_SIZE
))) swsusp_header
;
101 static struct swsusp_info swsusp_info
;
107 static unsigned short root_swap
= 0xffff;
109 static int mark_swapfiles(swp_entry_t start
)
113 rw_swap_page_sync(READ
,
114 swp_entry(root_swap
, 0),
115 virt_to_page((unsigned long)&swsusp_header
));
116 if (!memcmp("SWAP-SPACE",swsusp_header
.sig
, 10) ||
117 !memcmp("SWAPSPACE2",swsusp_header
.sig
, 10)) {
118 memcpy(swsusp_header
.orig_sig
,swsusp_header
.sig
, 10);
119 memcpy(swsusp_header
.sig
,SWSUSP_SIG
, 10);
120 swsusp_header
.image
= start
;
121 error
= rw_swap_page_sync(WRITE
,
122 swp_entry(root_swap
, 0),
123 virt_to_page((unsigned long)
126 pr_debug("swsusp: Partition is not swap space.\n");
133 * Check whether the swap device is the specified resume
134 * device, irrespective of whether they are specified by
137 * (Thus, device inode aliasing is allowed. You can say /dev/hda4
138 * instead of /dev/ide/host0/bus0/target0/lun0/part4 [if using devfs]
139 * and they'll be considered the same device. This is *necessary* for
140 * devfs, since the resume code can only recognize the form /dev/hda4,
141 * but the suspend code would see the long name.)
143 static inline int is_resume_device(const struct swap_info_struct
*swap_info
)
145 struct file
*file
= swap_info
->swap_file
;
146 struct inode
*inode
= file
->f_dentry
->d_inode
;
148 return S_ISBLK(inode
->i_mode
) &&
149 swsusp_resume_device
== MKDEV(imajor(inode
), iminor(inode
));
152 static int swsusp_swap_check(void) /* This is called before saving image */
156 spin_lock(&swap_lock
);
157 for (i
= 0; i
< MAX_SWAPFILES
; i
++) {
158 if (!(swap_info
[i
].flags
& SWP_WRITEOK
))
160 if (!swsusp_resume_device
|| is_resume_device(swap_info
+ i
)) {
161 spin_unlock(&swap_lock
);
166 spin_unlock(&swap_lock
);
171 * write_page - Write one page to a fresh swap location.
172 * @addr: Address we're writing.
173 * @loc: Place to store the entry we used.
175 * Allocate a new swap entry and 'sync' it. Note we discard -EIO
176 * errors. That is an artifact left over from swsusp. It did not
177 * check the return of rw_swap_page_sync() at all, since most pages
178 * written back to swap would return -EIO.
179 * This is a partial improvement, since we will at least return other
180 * errors, though we need to eventually fix the damn code.
182 static int write_page(unsigned long addr
, swp_entry_t
*loc
)
187 entry
= get_swap_page_of_type(root_swap
);
188 if (swp_offset(entry
)) {
189 error
= rw_swap_page_sync(WRITE
, entry
, virt_to_page(addr
));
190 if (!error
|| error
== -EIO
)
197 * Swap map-handling functions
199 * The swap map is a data structure used for keeping track of each page
200 * written to the swap. It consists of many swap_map_page structures
201 * that contain each an array of MAP_PAGE_SIZE swap entries.
202 * These structures are linked together with the help of either the
203 * .next (in memory) or the .next_swap (in swap) member.
205 * The swap map is created during suspend. At that time we need to keep
206 * it in memory, because we have to free all of the allocated swap
207 * entries if an error occurs. The memory needed is preallocated
208 * so that we know in advance if there's enough of it.
210 * The first swap_map_page structure is filled with the swap entries that
211 * correspond to the first MAP_PAGE_SIZE data pages written to swap and
212 * so on. After the all of the data pages have been written, the order
213 * of the swap_map_page structures in the map is reversed so that they
214 * can be read from swap in the original order. This causes the data
215 * pages to be loaded in exactly the same order in which they have been
218 * During resume we only need to use one swap_map_page structure
219 * at a time, which means that we only need to use two memory pages for
220 * reading the image - one for reading the swap_map_page structures
221 * and the second for reading the data pages from swap.
224 #define MAP_PAGE_SIZE ((PAGE_SIZE - sizeof(swp_entry_t) - sizeof(void *)) \
225 / sizeof(swp_entry_t))
227 struct swap_map_page
{
228 swp_entry_t entries
[MAP_PAGE_SIZE
];
229 swp_entry_t next_swap
;
230 struct swap_map_page
*next
;
233 static inline void free_swap_map(struct swap_map_page
*swap_map
)
235 struct swap_map_page
*swp
;
238 swp
= swap_map
->next
;
239 free_page((unsigned long)swap_map
);
244 static struct swap_map_page
*alloc_swap_map(unsigned int nr_pages
)
246 struct swap_map_page
*swap_map
, *swp
;
252 pr_debug("alloc_swap_map(): nr_pages = %d\n", nr_pages
);
253 swap_map
= (struct swap_map_page
*)get_zeroed_page(GFP_ATOMIC
);
255 for (n
= MAP_PAGE_SIZE
; n
< nr_pages
; n
+= MAP_PAGE_SIZE
) {
256 swp
->next
= (struct swap_map_page
*)get_zeroed_page(GFP_ATOMIC
);
259 free_swap_map(swap_map
);
267 * reverse_swap_map - reverse the order of pages in the swap map
271 static inline struct swap_map_page
*reverse_swap_map(struct swap_map_page
*swap_map
)
273 struct swap_map_page
*prev
, *next
;
277 next
= swap_map
->next
;
278 swap_map
->next
= prev
;
286 * free_swap_map_entries - free the swap entries allocated to store
287 * the swap map @swap_map (this is only called in case of an error)
289 static inline void free_swap_map_entries(struct swap_map_page
*swap_map
)
292 if (swap_map
->next_swap
.val
)
293 swap_free(swap_map
->next_swap
);
294 swap_map
= swap_map
->next
;
299 * save_swap_map - save the swap map used for tracing the data pages
303 static int save_swap_map(struct swap_map_page
*swap_map
, swp_entry_t
*start
)
305 swp_entry_t entry
= (swp_entry_t
){0};
309 swap_map
->next_swap
= entry
;
310 if ((error
= write_page((unsigned long)swap_map
, &entry
)))
312 swap_map
= swap_map
->next
;
319 * free_image_entries - free the swap entries allocated to store
320 * the image data pages (this is only called in case of an error)
323 static inline void free_image_entries(struct swap_map_page
*swp
)
328 for (k
= 0; k
< MAP_PAGE_SIZE
; k
++)
329 if (swp
->entries
[k
].val
)
330 swap_free(swp
->entries
[k
]);
336 * The swap_map_handle structure is used for handling the swap map in
340 struct swap_map_handle
{
341 struct swap_map_page
*cur
;
345 static inline void init_swap_map_handle(struct swap_map_handle
*handle
,
346 struct swap_map_page
*map
)
352 static inline int swap_map_write_page(struct swap_map_handle
*handle
,
357 error
= write_page(addr
, handle
->cur
->entries
+ handle
->k
);
360 if (++handle
->k
>= MAP_PAGE_SIZE
) {
361 handle
->cur
= handle
->cur
->next
;
368 * save_image_data - save the data pages pointed to by the PBEs
369 * from the list @pblist using the swap map handle @handle
370 * (assume there are @nr_pages data pages to save)
373 static int save_image_data(struct pbe
*pblist
,
374 struct swap_map_handle
*handle
,
375 unsigned int nr_pages
)
381 printk("Saving image data pages (%u pages) ... ", nr_pages
);
386 for_each_pbe (p
, pblist
) {
387 error
= swap_map_write_page(handle
, p
->address
);
391 printk("\b\b\b\b%3d%%", nr_pages
/ m
);
395 printk("\b\b\b\bdone\n");
399 static void dump_info(void)
401 pr_debug(" swsusp: Version: %u\n",swsusp_info
.version_code
);
402 pr_debug(" swsusp: Num Pages: %ld\n",swsusp_info
.num_physpages
);
403 pr_debug(" swsusp: UTS Sys: %s\n",swsusp_info
.uts
.sysname
);
404 pr_debug(" swsusp: UTS Node: %s\n",swsusp_info
.uts
.nodename
);
405 pr_debug(" swsusp: UTS Release: %s\n",swsusp_info
.uts
.release
);
406 pr_debug(" swsusp: UTS Version: %s\n",swsusp_info
.uts
.version
);
407 pr_debug(" swsusp: UTS Machine: %s\n",swsusp_info
.uts
.machine
);
408 pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info
.uts
.domainname
);
409 pr_debug(" swsusp: CPUs: %d\n",swsusp_info
.cpus
);
410 pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info
.image_pages
);
411 pr_debug(" swsusp: Total: %ld Pages\n", swsusp_info
.pages
);
414 static void init_header(unsigned int nr_pages
)
416 memset(&swsusp_info
, 0, sizeof(swsusp_info
));
417 swsusp_info
.version_code
= LINUX_VERSION_CODE
;
418 swsusp_info
.num_physpages
= num_physpages
;
419 memcpy(&swsusp_info
.uts
, &system_utsname
, sizeof(system_utsname
));
421 swsusp_info
.cpus
= num_online_cpus();
422 swsusp_info
.image_pages
= nr_pages
;
423 swsusp_info
.pages
= nr_pages
+
424 ((nr_pages
* sizeof(long) + PAGE_SIZE
- 1) >> PAGE_SHIFT
) + 1;
428 * pack_orig_addresses - the .orig_address fields of the PBEs from the
429 * list starting at @pbe are stored in the array @buf[] (1 page)
432 static inline struct pbe
*pack_orig_addresses(unsigned long *buf
,
437 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long) && pbe
; j
++) {
438 buf
[j
] = pbe
->orig_address
;
442 for (; j
< PAGE_SIZE
/ sizeof(long); j
++)
448 * save_image_metadata - save the .orig_address fields of the PBEs
449 * from the list @pblist using the swap map handle @handle
452 static int save_image_metadata(struct pbe
*pblist
,
453 struct swap_map_handle
*handle
)
460 printk("Saving image metadata ... ");
461 buf
= (unsigned long *)get_zeroed_page(GFP_ATOMIC
);
466 p
= pack_orig_addresses(buf
, p
);
467 error
= swap_map_write_page(handle
, (unsigned long)buf
);
472 free_page((unsigned long)buf
);
474 printk("done (%u pages saved)\n", n
);
479 * enough_swap - Make sure we have enough swap to save the image.
481 * Returns TRUE or FALSE after checking the total amount of swap
482 * space avaiable from the resume partition.
485 static int enough_swap(unsigned int nr_pages
)
487 unsigned int free_swap
= swap_info
[root_swap
].pages
-
488 swap_info
[root_swap
].inuse_pages
;
490 pr_debug("swsusp: free swap pages: %u\n", free_swap
);
491 return free_swap
> (nr_pages
+ PAGES_FOR_IO
+
492 (nr_pages
+ PBES_PER_PAGE
- 1) / PBES_PER_PAGE
);
496 * swsusp_write - Write entire image and metadata.
498 * It is important _NOT_ to umount filesystems at this point. We want
499 * them synced (in case something goes wrong) but we DO not want to mark
500 * filesystem clean: it is not. (And it does not matter, if we resume
501 * correctly, we'll mark system clean, anyway.)
504 int swsusp_write(struct pbe
*pblist
, unsigned int nr_pages
)
506 struct swap_map_page
*swap_map
;
507 struct swap_map_handle handle
;
511 if ((error
= swsusp_swap_check())) {
512 printk(KERN_ERR
"swsusp: Cannot find swap device, try swapon -a.\n");
515 if (!enough_swap(nr_pages
)) {
516 printk(KERN_ERR
"swsusp: Not enough free swap\n");
520 init_header(nr_pages
);
521 swap_map
= alloc_swap_map(swsusp_info
.pages
);
524 init_swap_map_handle(&handle
, swap_map
);
526 error
= swap_map_write_page(&handle
, (unsigned long)&swsusp_info
);
528 error
= save_image_metadata(pblist
, &handle
);
530 error
= save_image_data(pblist
, &handle
, nr_pages
);
532 goto Free_image_entries
;
534 swap_map
= reverse_swap_map(swap_map
);
535 error
= save_swap_map(swap_map
, &start
);
537 goto Free_map_entries
;
541 error
= mark_swapfiles(start
);
544 goto Free_map_entries
;
547 free_swap_map(swap_map
);
551 free_swap_map_entries(swap_map
);
553 free_image_entries(swap_map
);
558 * swsusp_shrink_memory - Try to free as much memory as needed
560 * ... but do not OOM-kill anyone
562 * Notice: all userland should be stopped before it is called, or
563 * livelock is possible.
566 #define SHRINK_BITE 10000
568 int swsusp_shrink_memory(void)
572 unsigned long pages
= 0;
576 printk("Shrinking memory... ");
578 size
= 2 * count_highmem_pages();
579 size
+= size
/ 50 + count_data_pages();
580 size
+= (size
+ PBES_PER_PAGE
- 1) / PBES_PER_PAGE
+
584 if (!is_highmem(zone
))
585 tmp
-= zone
->free_pages
;
587 tmp
= shrink_all_memory(SHRINK_BITE
);
591 } else if (size
> image_size
/ PAGE_SIZE
) {
592 tmp
= shrink_all_memory(SHRINK_BITE
);
595 printk("\b%c", p
[i
++%4]);
597 printk("\bdone (%lu pages freed)\n", pages
);
602 int swsusp_suspend(void)
606 if ((error
= arch_prepare_suspend()))
609 /* At this point, device_suspend() has been called, but *not*
610 * device_power_down(). We *must* device_power_down() now.
611 * Otherwise, drivers for some devices (e.g. interrupt controllers)
612 * become desynchronized with the actual state of the hardware
613 * at resume time, and evil weirdness ensues.
615 if ((error
= device_power_down(PMSG_FREEZE
))) {
616 printk(KERN_ERR
"Some devices failed to power down, aborting suspend\n");
620 if ((error
= save_highmem())) {
621 printk(KERN_ERR
"swsusp: Not enough free pages for highmem\n");
622 goto Restore_highmem
;
625 save_processor_state();
626 if ((error
= swsusp_arch_suspend()))
627 printk(KERN_ERR
"Error %d suspending\n", error
);
628 /* Restore control flow magically appears here */
629 restore_processor_state();
638 int swsusp_resume(void)
642 if (device_power_down(PMSG_FREEZE
))
643 printk(KERN_ERR
"Some devices failed to power down, very bad\n");
644 /* We'll ignore saved state, but this gets preempt count (etc) right */
645 save_processor_state();
646 error
= swsusp_arch_resume();
647 /* Code below is only ever reached in case of failure. Otherwise
648 * execution continues at place where swsusp_arch_suspend was called
651 /* The only reason why swsusp_arch_resume() can fail is memory being
652 * very tight, so we have to free it as soon as we can to avoid
653 * subsequent failures
656 restore_processor_state();
658 touch_softlockup_watchdog();
665 * mark_unsafe_pages - mark the pages that cannot be used for storing
666 * the image during resume, because they conflict with the pages that
667 * had been used before suspend
670 static void mark_unsafe_pages(struct pbe
*pblist
)
673 unsigned long zone_pfn
;
676 if (!pblist
) /* a sanity check */
679 /* Clear page flags */
680 for_each_zone (zone
) {
681 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
682 if (pfn_valid(zone_pfn
+ zone
->zone_start_pfn
))
683 ClearPageNosaveFree(pfn_to_page(zone_pfn
+
684 zone
->zone_start_pfn
));
687 /* Mark orig addresses */
688 for_each_pbe (p
, pblist
)
689 SetPageNosaveFree(virt_to_page(p
->orig_address
));
693 static void copy_page_backup_list(struct pbe
*dst
, struct pbe
*src
)
695 /* We assume both lists contain the same number of elements */
697 dst
->orig_address
= src
->orig_address
;
704 * Using bio to read from swap.
705 * This code requires a bit more work than just using buffer heads
706 * but, it is the recommended way for 2.5/2.6.
707 * The following are to signal the beginning and end of I/O. Bios
708 * finish asynchronously, while we want them to happen synchronously.
709 * A simple atomic_t, and a wait loop take care of this problem.
712 static atomic_t io_done
= ATOMIC_INIT(0);
714 static int end_io(struct bio
*bio
, unsigned int num
, int err
)
716 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
717 panic("I/O error reading memory image");
718 atomic_set(&io_done
, 0);
722 static struct block_device
*resume_bdev
;
725 * submit - submit BIO request.
726 * @rw: READ or WRITE.
727 * @off physical offset of page.
728 * @page: page we're reading or writing.
730 * Straight from the textbook - allocate and initialize the bio.
731 * If we're writing, make sure the page is marked as dirty.
732 * Then submit it and wait.
735 static int submit(int rw
, pgoff_t page_off
, void *page
)
740 bio
= bio_alloc(GFP_ATOMIC
, 1);
743 bio
->bi_sector
= page_off
* (PAGE_SIZE
>> 9);
744 bio
->bi_bdev
= resume_bdev
;
745 bio
->bi_end_io
= end_io
;
747 if (bio_add_page(bio
, virt_to_page(page
), PAGE_SIZE
, 0) < PAGE_SIZE
) {
748 printk("swsusp: ERROR: adding page to bio at %ld\n",page_off
);
754 atomic_set(&io_done
, 1);
755 submit_bio(rw
| (1 << BIO_RW_SYNC
), bio
);
756 while (atomic_read(&io_done
))
759 bio_set_pages_dirty(bio
);
765 static int bio_read_page(pgoff_t page_off
, void *page
)
767 return submit(READ
, page_off
, page
);
770 static int bio_write_page(pgoff_t page_off
, void *page
)
772 return submit(WRITE
, page_off
, page
);
776 * The following functions allow us to read data using a swap map
777 * in a file-alike way
780 static inline void release_swap_map_reader(struct swap_map_handle
*handle
)
783 free_page((unsigned long)handle
->cur
);
787 static inline int get_swap_map_reader(struct swap_map_handle
*handle
,
792 if (!swp_offset(start
))
794 handle
->cur
= (struct swap_map_page
*)get_zeroed_page(GFP_ATOMIC
);
797 error
= bio_read_page(swp_offset(start
), handle
->cur
);
799 release_swap_map_reader(handle
);
806 static inline int swap_map_read_page(struct swap_map_handle
*handle
, void *buf
)
808 unsigned long offset
;
813 offset
= swp_offset(handle
->cur
->entries
[handle
->k
]);
816 error
= bio_read_page(offset
, buf
);
819 if (++handle
->k
>= MAP_PAGE_SIZE
) {
821 offset
= swp_offset(handle
->cur
->next_swap
);
823 release_swap_map_reader(handle
);
825 error
= bio_read_page(offset
, handle
->cur
);
830 static int check_header(void)
835 if (swsusp_info
.version_code
!= LINUX_VERSION_CODE
)
836 reason
= "kernel version";
837 if (swsusp_info
.num_physpages
!= num_physpages
)
838 reason
= "memory size";
839 if (strcmp(swsusp_info
.uts
.sysname
,system_utsname
.sysname
))
840 reason
= "system type";
841 if (strcmp(swsusp_info
.uts
.release
,system_utsname
.release
))
842 reason
= "kernel release";
843 if (strcmp(swsusp_info
.uts
.version
,system_utsname
.version
))
845 if (strcmp(swsusp_info
.uts
.machine
,system_utsname
.machine
))
848 printk(KERN_ERR
"swsusp: Resume mismatch: %s\n", reason
);
855 * load_image_data - load the image data using the swap map handle
856 * @handle and store them using the page backup list @pblist
857 * (assume there are @nr_pages pages to load)
860 static int load_image_data(struct pbe
*pblist
,
861 struct swap_map_handle
*handle
,
862 unsigned int nr_pages
)
870 printk("Loading image data pages (%u pages) ... ", nr_pages
);
877 error
= swap_map_read_page(handle
, (void *)p
->address
);
882 printk("\b\b\b\b%3d%%", nr_pages
/ m
);
886 printk("\b\b\b\bdone\n");
891 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
892 * the PBEs in the list starting at @pbe
895 static inline struct pbe
*unpack_orig_addresses(unsigned long *buf
,
900 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long) && pbe
; j
++) {
901 pbe
->orig_address
= buf
[j
];
908 * load_image_metadata - load the image metadata using the swap map
909 * handle @handle and put them into the PBEs in the list @pblist
912 static int load_image_metadata(struct pbe
*pblist
, struct swap_map_handle
*handle
)
919 printk("Loading image metadata ... ");
920 buf
= (unsigned long *)get_zeroed_page(GFP_ATOMIC
);
925 error
= swap_map_read_page(handle
, buf
);
928 p
= unpack_orig_addresses(buf
, p
);
931 free_page((unsigned long)buf
);
933 printk("done (%u pages loaded)\n", n
);
937 int swsusp_read(struct pbe
**pblist_ptr
)
940 struct pbe
*p
, *pblist
;
941 struct swap_map_handle handle
;
942 unsigned int nr_pages
;
944 if (IS_ERR(resume_bdev
)) {
945 pr_debug("swsusp: block device not initialised\n");
946 return PTR_ERR(resume_bdev
);
949 error
= get_swap_map_reader(&handle
, swsusp_header
.image
);
951 error
= swap_map_read_page(&handle
, &swsusp_info
);
953 error
= check_header();
956 nr_pages
= swsusp_info
.image_pages
;
957 p
= alloc_pagedir(nr_pages
, GFP_ATOMIC
, 0);
960 error
= load_image_metadata(p
, &handle
);
962 mark_unsafe_pages(p
);
963 pblist
= alloc_pagedir(nr_pages
, GFP_ATOMIC
, 1);
965 copy_page_backup_list(pblist
, p
);
970 /* Allocate memory for the image and read the data from swap */
972 error
= alloc_data_pages(pblist
, GFP_ATOMIC
, 1);
974 release_eaten_pages();
975 error
= load_image_data(pblist
, &handle
, nr_pages
);
978 *pblist_ptr
= pblist
;
980 release_swap_map_reader(&handle
);
982 blkdev_put(resume_bdev
);
985 pr_debug("swsusp: Reading resume file was successful\n");
987 pr_debug("swsusp: Error %d resuming\n", error
);
992 * swsusp_check - Check for swsusp signature in the resume device
995 int swsusp_check(void)
999 resume_bdev
= open_by_devnum(swsusp_resume_device
, FMODE_READ
);
1000 if (!IS_ERR(resume_bdev
)) {
1001 set_blocksize(resume_bdev
, PAGE_SIZE
);
1002 memset(&swsusp_header
, 0, sizeof(swsusp_header
));
1003 if ((error
= bio_read_page(0, &swsusp_header
)))
1005 if (!memcmp(SWSUSP_SIG
, swsusp_header
.sig
, 10)) {
1006 memcpy(swsusp_header
.sig
, swsusp_header
.orig_sig
, 10);
1007 /* Reset swap signature now */
1008 error
= bio_write_page(0, &swsusp_header
);
1013 blkdev_put(resume_bdev
);
1015 pr_debug("swsusp: Signature found, resuming\n");
1017 error
= PTR_ERR(resume_bdev
);
1021 pr_debug("swsusp: Error %d check for resume file\n", error
);
1027 * swsusp_close - close swap device.
1030 void swsusp_close(void)
1032 if (IS_ERR(resume_bdev
)) {
1033 pr_debug("swsusp: block device not initialised\n");
1037 blkdev_put(resume_bdev
);