Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6/linux-mips.git] / kernel / power / swap.c
blob7c97c3a0eee393ea1e5e879d84bc8c78a6ecabf2
1 /*
2 * linux/kernel/power/swap.c
4 * This file provides functions for reading the suspend image from
5 * and writing it to a swap partition.
7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
11 * This file is released under the GPLv2.
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/genhd.h>
20 #include <linux/device.h>
21 #include <linux/buffer_head.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/swap.h>
25 #include <linux/swapops.h>
26 #include <linux/pm.h>
27 #include <linux/slab.h>
28 #include <linux/lzo.h>
29 #include <linux/vmalloc.h>
31 #include "power.h"
33 #define HIBERNATE_SIG "S1SUSPEND"
36 * The swap map is a data structure used for keeping track of each page
37 * written to a swap partition. It consists of many swap_map_page
38 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
39 * These structures are stored on the swap and linked together with the
40 * help of the .next_swap member.
42 * The swap map is created during suspend. The swap map pages are
43 * allocated and populated one at a time, so we only need one memory
44 * page to set up the entire structure.
46 * During resume we also only need to use one swap_map_page structure
47 * at a time.
50 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
52 struct swap_map_page {
53 sector_t entries[MAP_PAGE_ENTRIES];
54 sector_t next_swap;
57 /**
58 * The swap_map_handle structure is used for handling swap in
59 * a file-alike way
62 struct swap_map_handle {
63 struct swap_map_page *cur;
64 sector_t cur_swap;
65 sector_t first_sector;
66 unsigned int k;
69 struct swsusp_header {
70 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)];
71 sector_t image;
72 unsigned int flags; /* Flags to pass to the "boot" kernel */
73 char orig_sig[10];
74 char sig[10];
75 } __attribute__((packed));
77 static struct swsusp_header *swsusp_header;
79 /**
80 * The following functions are used for tracing the allocated
81 * swap pages, so that they can be freed in case of an error.
84 struct swsusp_extent {
85 struct rb_node node;
86 unsigned long start;
87 unsigned long end;
90 static struct rb_root swsusp_extents = RB_ROOT;
92 static int swsusp_extents_insert(unsigned long swap_offset)
94 struct rb_node **new = &(swsusp_extents.rb_node);
95 struct rb_node *parent = NULL;
96 struct swsusp_extent *ext;
98 /* Figure out where to put the new node */
99 while (*new) {
100 ext = container_of(*new, struct swsusp_extent, node);
101 parent = *new;
102 if (swap_offset < ext->start) {
103 /* Try to merge */
104 if (swap_offset == ext->start - 1) {
105 ext->start--;
106 return 0;
108 new = &((*new)->rb_left);
109 } else if (swap_offset > ext->end) {
110 /* Try to merge */
111 if (swap_offset == ext->end + 1) {
112 ext->end++;
113 return 0;
115 new = &((*new)->rb_right);
116 } else {
117 /* It already is in the tree */
118 return -EINVAL;
121 /* Add the new node and rebalance the tree. */
122 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
123 if (!ext)
124 return -ENOMEM;
126 ext->start = swap_offset;
127 ext->end = swap_offset;
128 rb_link_node(&ext->node, parent, new);
129 rb_insert_color(&ext->node, &swsusp_extents);
130 return 0;
134 * alloc_swapdev_block - allocate a swap page and register that it has
135 * been allocated, so that it can be freed in case of an error.
138 sector_t alloc_swapdev_block(int swap)
140 unsigned long offset;
142 offset = swp_offset(get_swap_page_of_type(swap));
143 if (offset) {
144 if (swsusp_extents_insert(offset))
145 swap_free(swp_entry(swap, offset));
146 else
147 return swapdev_block(swap, offset);
149 return 0;
153 * free_all_swap_pages - free swap pages allocated for saving image data.
154 * It also frees the extents used to register which swap entries had been
155 * allocated.
158 void free_all_swap_pages(int swap)
160 struct rb_node *node;
162 while ((node = swsusp_extents.rb_node)) {
163 struct swsusp_extent *ext;
164 unsigned long offset;
166 ext = container_of(node, struct swsusp_extent, node);
167 rb_erase(node, &swsusp_extents);
168 for (offset = ext->start; offset <= ext->end; offset++)
169 swap_free(swp_entry(swap, offset));
171 kfree(ext);
175 int swsusp_swap_in_use(void)
177 return (swsusp_extents.rb_node != NULL);
181 * General things
184 static unsigned short root_swap = 0xffff;
185 struct block_device *hib_resume_bdev;
188 * Saving part
191 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
193 int error;
195 hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL);
196 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
197 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
198 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
199 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
200 swsusp_header->image = handle->first_sector;
201 swsusp_header->flags = flags;
202 error = hib_bio_write_page(swsusp_resume_block,
203 swsusp_header, NULL);
204 } else {
205 printk(KERN_ERR "PM: Swap header not found!\n");
206 error = -ENODEV;
208 return error;
212 * swsusp_swap_check - check if the resume device is a swap device
213 * and get its index (if so)
215 * This is called before saving image
217 static int swsusp_swap_check(void)
219 int res;
221 res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
222 &hib_resume_bdev);
223 if (res < 0)
224 return res;
226 root_swap = res;
227 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
228 if (res)
229 return res;
231 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
232 if (res < 0)
233 blkdev_put(hib_resume_bdev, FMODE_WRITE);
235 return res;
239 * write_page - Write one page to given swap location.
240 * @buf: Address we're writing.
241 * @offset: Offset of the swap page we're writing to.
242 * @bio_chain: Link the next write BIO here
245 static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
247 void *src;
249 if (!offset)
250 return -ENOSPC;
252 if (bio_chain) {
253 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
254 if (src) {
255 copy_page(src, buf);
256 } else {
257 WARN_ON_ONCE(1);
258 bio_chain = NULL; /* Go synchronous */
259 src = buf;
261 } else {
262 src = buf;
264 return hib_bio_write_page(offset, src, bio_chain);
267 static void release_swap_writer(struct swap_map_handle *handle)
269 if (handle->cur)
270 free_page((unsigned long)handle->cur);
271 handle->cur = NULL;
274 static int get_swap_writer(struct swap_map_handle *handle)
276 int ret;
278 ret = swsusp_swap_check();
279 if (ret) {
280 if (ret != -ENOSPC)
281 printk(KERN_ERR "PM: Cannot find swap device, try "
282 "swapon -a.\n");
283 return ret;
285 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
286 if (!handle->cur) {
287 ret = -ENOMEM;
288 goto err_close;
290 handle->cur_swap = alloc_swapdev_block(root_swap);
291 if (!handle->cur_swap) {
292 ret = -ENOSPC;
293 goto err_rel;
295 handle->k = 0;
296 handle->first_sector = handle->cur_swap;
297 return 0;
298 err_rel:
299 release_swap_writer(handle);
300 err_close:
301 swsusp_close(FMODE_WRITE);
302 return ret;
305 static int swap_write_page(struct swap_map_handle *handle, void *buf,
306 struct bio **bio_chain)
308 int error = 0;
309 sector_t offset;
311 if (!handle->cur)
312 return -EINVAL;
313 offset = alloc_swapdev_block(root_swap);
314 error = write_page(buf, offset, bio_chain);
315 if (error)
316 return error;
317 handle->cur->entries[handle->k++] = offset;
318 if (handle->k >= MAP_PAGE_ENTRIES) {
319 error = hib_wait_on_bio_chain(bio_chain);
320 if (error)
321 goto out;
322 offset = alloc_swapdev_block(root_swap);
323 if (!offset)
324 return -ENOSPC;
325 handle->cur->next_swap = offset;
326 error = write_page(handle->cur, handle->cur_swap, NULL);
327 if (error)
328 goto out;
329 clear_page(handle->cur);
330 handle->cur_swap = offset;
331 handle->k = 0;
333 out:
334 return error;
337 static int flush_swap_writer(struct swap_map_handle *handle)
339 if (handle->cur && handle->cur_swap)
340 return write_page(handle->cur, handle->cur_swap, NULL);
341 else
342 return -EINVAL;
345 static int swap_writer_finish(struct swap_map_handle *handle,
346 unsigned int flags, int error)
348 if (!error) {
349 flush_swap_writer(handle);
350 printk(KERN_INFO "PM: S");
351 error = mark_swapfiles(handle, flags);
352 printk("|\n");
355 if (error)
356 free_all_swap_pages(root_swap);
357 release_swap_writer(handle);
358 swsusp_close(FMODE_WRITE);
360 return error;
363 /* We need to remember how much compressed data we need to read. */
364 #define LZO_HEADER sizeof(size_t)
366 /* Number of pages/bytes we'll compress at one time. */
367 #define LZO_UNC_PAGES 32
368 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
370 /* Number of pages/bytes we need for compressed data (worst case). */
371 #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
372 LZO_HEADER, PAGE_SIZE)
373 #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
376 * save_image - save the suspend image data
379 static int save_image(struct swap_map_handle *handle,
380 struct snapshot_handle *snapshot,
381 unsigned int nr_to_write)
383 unsigned int m;
384 int ret;
385 int nr_pages;
386 int err2;
387 struct bio *bio;
388 struct timeval start;
389 struct timeval stop;
391 printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ",
392 nr_to_write);
393 m = nr_to_write / 100;
394 if (!m)
395 m = 1;
396 nr_pages = 0;
397 bio = NULL;
398 do_gettimeofday(&start);
399 while (1) {
400 ret = snapshot_read_next(snapshot);
401 if (ret <= 0)
402 break;
403 ret = swap_write_page(handle, data_of(*snapshot), &bio);
404 if (ret)
405 break;
406 if (!(nr_pages % m))
407 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
408 nr_pages++;
410 err2 = hib_wait_on_bio_chain(&bio);
411 do_gettimeofday(&stop);
412 if (!ret)
413 ret = err2;
414 if (!ret)
415 printk(KERN_CONT "\b\b\b\bdone\n");
416 else
417 printk(KERN_CONT "\n");
418 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
419 return ret;
424 * save_image_lzo - Save the suspend image data compressed with LZO.
425 * @handle: Swap mam handle to use for saving the image.
426 * @snapshot: Image to read data from.
427 * @nr_to_write: Number of pages to save.
429 static int save_image_lzo(struct swap_map_handle *handle,
430 struct snapshot_handle *snapshot,
431 unsigned int nr_to_write)
433 unsigned int m;
434 int ret = 0;
435 int nr_pages;
436 int err2;
437 struct bio *bio;
438 struct timeval start;
439 struct timeval stop;
440 size_t off, unc_len, cmp_len;
441 unsigned char *unc, *cmp, *wrk, *page;
443 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
444 if (!page) {
445 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
446 return -ENOMEM;
449 wrk = vmalloc(LZO1X_1_MEM_COMPRESS);
450 if (!wrk) {
451 printk(KERN_ERR "PM: Failed to allocate LZO workspace\n");
452 free_page((unsigned long)page);
453 return -ENOMEM;
456 unc = vmalloc(LZO_UNC_SIZE);
457 if (!unc) {
458 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
459 vfree(wrk);
460 free_page((unsigned long)page);
461 return -ENOMEM;
464 cmp = vmalloc(LZO_CMP_SIZE);
465 if (!cmp) {
466 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
467 vfree(unc);
468 vfree(wrk);
469 free_page((unsigned long)page);
470 return -ENOMEM;
473 printk(KERN_INFO
474 "PM: Compressing and saving image data (%u pages) ... ",
475 nr_to_write);
476 m = nr_to_write / 100;
477 if (!m)
478 m = 1;
479 nr_pages = 0;
480 bio = NULL;
481 do_gettimeofday(&start);
482 for (;;) {
483 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
484 ret = snapshot_read_next(snapshot);
485 if (ret < 0)
486 goto out_finish;
488 if (!ret)
489 break;
491 memcpy(unc + off, data_of(*snapshot), PAGE_SIZE);
493 if (!(nr_pages % m))
494 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
495 nr_pages++;
498 if (!off)
499 break;
501 unc_len = off;
502 ret = lzo1x_1_compress(unc, unc_len,
503 cmp + LZO_HEADER, &cmp_len, wrk);
504 if (ret < 0) {
505 printk(KERN_ERR "PM: LZO compression failed\n");
506 break;
509 if (unlikely(!cmp_len ||
510 cmp_len > lzo1x_worst_compress(unc_len))) {
511 printk(KERN_ERR "PM: Invalid LZO compressed length\n");
512 ret = -1;
513 break;
516 *(size_t *)cmp = cmp_len;
519 * Given we are writing one page at a time to disk, we copy
520 * that much from the buffer, although the last bit will likely
521 * be smaller than full page. This is OK - we saved the length
522 * of the compressed data, so any garbage at the end will be
523 * discarded when we read it.
525 for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) {
526 memcpy(page, cmp + off, PAGE_SIZE);
528 ret = swap_write_page(handle, page, &bio);
529 if (ret)
530 goto out_finish;
534 out_finish:
535 err2 = hib_wait_on_bio_chain(&bio);
536 do_gettimeofday(&stop);
537 if (!ret)
538 ret = err2;
539 if (!ret)
540 printk(KERN_CONT "\b\b\b\bdone\n");
541 else
542 printk(KERN_CONT "\n");
543 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
545 vfree(cmp);
546 vfree(unc);
547 vfree(wrk);
548 free_page((unsigned long)page);
550 return ret;
554 * enough_swap - Make sure we have enough swap to save the image.
556 * Returns TRUE or FALSE after checking the total amount of swap
557 * space avaiable from the resume partition.
560 static int enough_swap(unsigned int nr_pages, unsigned int flags)
562 unsigned int free_swap = count_swap_pages(root_swap, 1);
563 unsigned int required;
565 pr_debug("PM: Free swap pages: %u\n", free_swap);
567 required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ?
568 nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1);
569 return free_swap > required;
573 * swsusp_write - Write entire image and metadata.
574 * @flags: flags to pass to the "boot" kernel in the image header
576 * It is important _NOT_ to umount filesystems at this point. We want
577 * them synced (in case something goes wrong) but we DO not want to mark
578 * filesystem clean: it is not. (And it does not matter, if we resume
579 * correctly, we'll mark system clean, anyway.)
582 int swsusp_write(unsigned int flags)
584 struct swap_map_handle handle;
585 struct snapshot_handle snapshot;
586 struct swsusp_info *header;
587 unsigned long pages;
588 int error;
590 pages = snapshot_get_image_size();
591 error = get_swap_writer(&handle);
592 if (error) {
593 printk(KERN_ERR "PM: Cannot get swap writer\n");
594 return error;
596 if (!enough_swap(pages, flags)) {
597 printk(KERN_ERR "PM: Not enough free swap\n");
598 error = -ENOSPC;
599 goto out_finish;
601 memset(&snapshot, 0, sizeof(struct snapshot_handle));
602 error = snapshot_read_next(&snapshot);
603 if (error < PAGE_SIZE) {
604 if (error >= 0)
605 error = -EFAULT;
607 goto out_finish;
609 header = (struct swsusp_info *)data_of(snapshot);
610 error = swap_write_page(&handle, header, NULL);
611 if (!error) {
612 error = (flags & SF_NOCOMPRESS_MODE) ?
613 save_image(&handle, &snapshot, pages - 1) :
614 save_image_lzo(&handle, &snapshot, pages - 1);
616 out_finish:
617 error = swap_writer_finish(&handle, flags, error);
618 return error;
622 * The following functions allow us to read data using a swap map
623 * in a file-alike way
626 static void release_swap_reader(struct swap_map_handle *handle)
628 if (handle->cur)
629 free_page((unsigned long)handle->cur);
630 handle->cur = NULL;
633 static int get_swap_reader(struct swap_map_handle *handle,
634 unsigned int *flags_p)
636 int error;
638 *flags_p = swsusp_header->flags;
640 if (!swsusp_header->image) /* how can this happen? */
641 return -EINVAL;
643 handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
644 if (!handle->cur)
645 return -ENOMEM;
647 error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL);
648 if (error) {
649 release_swap_reader(handle);
650 return error;
652 handle->k = 0;
653 return 0;
656 static int swap_read_page(struct swap_map_handle *handle, void *buf,
657 struct bio **bio_chain)
659 sector_t offset;
660 int error;
662 if (!handle->cur)
663 return -EINVAL;
664 offset = handle->cur->entries[handle->k];
665 if (!offset)
666 return -EFAULT;
667 error = hib_bio_read_page(offset, buf, bio_chain);
668 if (error)
669 return error;
670 if (++handle->k >= MAP_PAGE_ENTRIES) {
671 error = hib_wait_on_bio_chain(bio_chain);
672 handle->k = 0;
673 offset = handle->cur->next_swap;
674 if (!offset)
675 release_swap_reader(handle);
676 else if (!error)
677 error = hib_bio_read_page(offset, handle->cur, NULL);
679 return error;
682 static int swap_reader_finish(struct swap_map_handle *handle)
684 release_swap_reader(handle);
686 return 0;
690 * load_image - load the image using the swap map handle
691 * @handle and the snapshot handle @snapshot
692 * (assume there are @nr_pages pages to load)
695 static int load_image(struct swap_map_handle *handle,
696 struct snapshot_handle *snapshot,
697 unsigned int nr_to_read)
699 unsigned int m;
700 int error = 0;
701 struct timeval start;
702 struct timeval stop;
703 struct bio *bio;
704 int err2;
705 unsigned nr_pages;
707 printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ",
708 nr_to_read);
709 m = nr_to_read / 100;
710 if (!m)
711 m = 1;
712 nr_pages = 0;
713 bio = NULL;
714 do_gettimeofday(&start);
715 for ( ; ; ) {
716 error = snapshot_write_next(snapshot);
717 if (error <= 0)
718 break;
719 error = swap_read_page(handle, data_of(*snapshot), &bio);
720 if (error)
721 break;
722 if (snapshot->sync_read)
723 error = hib_wait_on_bio_chain(&bio);
724 if (error)
725 break;
726 if (!(nr_pages % m))
727 printk("\b\b\b\b%3d%%", nr_pages / m);
728 nr_pages++;
730 err2 = hib_wait_on_bio_chain(&bio);
731 do_gettimeofday(&stop);
732 if (!error)
733 error = err2;
734 if (!error) {
735 printk("\b\b\b\bdone\n");
736 snapshot_write_finalize(snapshot);
737 if (!snapshot_image_loaded(snapshot))
738 error = -ENODATA;
739 } else
740 printk("\n");
741 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
742 return error;
746 * load_image_lzo - Load compressed image data and decompress them with LZO.
747 * @handle: Swap map handle to use for loading data.
748 * @snapshot: Image to copy uncompressed data into.
749 * @nr_to_read: Number of pages to load.
751 static int load_image_lzo(struct swap_map_handle *handle,
752 struct snapshot_handle *snapshot,
753 unsigned int nr_to_read)
755 unsigned int m;
756 int error = 0;
757 struct bio *bio;
758 struct timeval start;
759 struct timeval stop;
760 unsigned nr_pages;
761 size_t i, off, unc_len, cmp_len;
762 unsigned char *unc, *cmp, *page[LZO_CMP_PAGES];
764 for (i = 0; i < LZO_CMP_PAGES; i++) {
765 page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
766 if (!page[i]) {
767 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
769 while (i)
770 free_page((unsigned long)page[--i]);
772 return -ENOMEM;
776 unc = vmalloc(LZO_UNC_SIZE);
777 if (!unc) {
778 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
780 for (i = 0; i < LZO_CMP_PAGES; i++)
781 free_page((unsigned long)page[i]);
783 return -ENOMEM;
786 cmp = vmalloc(LZO_CMP_SIZE);
787 if (!cmp) {
788 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
790 vfree(unc);
791 for (i = 0; i < LZO_CMP_PAGES; i++)
792 free_page((unsigned long)page[i]);
794 return -ENOMEM;
797 printk(KERN_INFO
798 "PM: Loading and decompressing image data (%u pages) ... ",
799 nr_to_read);
800 m = nr_to_read / 100;
801 if (!m)
802 m = 1;
803 nr_pages = 0;
804 bio = NULL;
805 do_gettimeofday(&start);
807 error = snapshot_write_next(snapshot);
808 if (error <= 0)
809 goto out_finish;
811 for (;;) {
812 error = swap_read_page(handle, page[0], NULL); /* sync */
813 if (error)
814 break;
816 cmp_len = *(size_t *)page[0];
817 if (unlikely(!cmp_len ||
818 cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) {
819 printk(KERN_ERR "PM: Invalid LZO compressed length\n");
820 error = -1;
821 break;
824 for (off = PAGE_SIZE, i = 1;
825 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
826 error = swap_read_page(handle, page[i], &bio);
827 if (error)
828 goto out_finish;
831 error = hib_wait_on_bio_chain(&bio); /* need all data now */
832 if (error)
833 goto out_finish;
835 for (off = 0, i = 0;
836 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
837 memcpy(cmp + off, page[i], PAGE_SIZE);
840 unc_len = LZO_UNC_SIZE;
841 error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len,
842 unc, &unc_len);
843 if (error < 0) {
844 printk(KERN_ERR "PM: LZO decompression failed\n");
845 break;
848 if (unlikely(!unc_len ||
849 unc_len > LZO_UNC_SIZE ||
850 unc_len & (PAGE_SIZE - 1))) {
851 printk(KERN_ERR "PM: Invalid LZO uncompressed length\n");
852 error = -1;
853 break;
856 for (off = 0; off < unc_len; off += PAGE_SIZE) {
857 memcpy(data_of(*snapshot), unc + off, PAGE_SIZE);
859 if (!(nr_pages % m))
860 printk("\b\b\b\b%3d%%", nr_pages / m);
861 nr_pages++;
863 error = snapshot_write_next(snapshot);
864 if (error <= 0)
865 goto out_finish;
869 out_finish:
870 do_gettimeofday(&stop);
871 if (!error) {
872 printk("\b\b\b\bdone\n");
873 snapshot_write_finalize(snapshot);
874 if (!snapshot_image_loaded(snapshot))
875 error = -ENODATA;
876 } else
877 printk("\n");
878 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
880 vfree(cmp);
881 vfree(unc);
882 for (i = 0; i < LZO_CMP_PAGES; i++)
883 free_page((unsigned long)page[i]);
885 return error;
889 * swsusp_read - read the hibernation image.
890 * @flags_p: flags passed by the "frozen" kernel in the image header should
891 * be written into this memory location
894 int swsusp_read(unsigned int *flags_p)
896 int error;
897 struct swap_map_handle handle;
898 struct snapshot_handle snapshot;
899 struct swsusp_info *header;
901 memset(&snapshot, 0, sizeof(struct snapshot_handle));
902 error = snapshot_write_next(&snapshot);
903 if (error < PAGE_SIZE)
904 return error < 0 ? error : -EFAULT;
905 header = (struct swsusp_info *)data_of(snapshot);
906 error = get_swap_reader(&handle, flags_p);
907 if (error)
908 goto end;
909 if (!error)
910 error = swap_read_page(&handle, header, NULL);
911 if (!error) {
912 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
913 load_image(&handle, &snapshot, header->pages - 1) :
914 load_image_lzo(&handle, &snapshot, header->pages - 1);
916 swap_reader_finish(&handle);
917 end:
918 if (!error)
919 pr_debug("PM: Image successfully loaded\n");
920 else
921 pr_debug("PM: Error %d resuming\n", error);
922 return error;
926 * swsusp_check - Check for swsusp signature in the resume device
929 int swsusp_check(void)
931 int error;
933 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
934 FMODE_READ, NULL);
935 if (!IS_ERR(hib_resume_bdev)) {
936 set_blocksize(hib_resume_bdev, PAGE_SIZE);
937 clear_page(swsusp_header);
938 error = hib_bio_read_page(swsusp_resume_block,
939 swsusp_header, NULL);
940 if (error)
941 goto put;
943 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
944 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
945 /* Reset swap signature now */
946 error = hib_bio_write_page(swsusp_resume_block,
947 swsusp_header, NULL);
948 } else {
949 error = -EINVAL;
952 put:
953 if (error)
954 blkdev_put(hib_resume_bdev, FMODE_READ);
955 else
956 pr_debug("PM: Image signature found, resuming\n");
957 } else {
958 error = PTR_ERR(hib_resume_bdev);
961 if (error)
962 pr_debug("PM: Image not found (code %d)\n", error);
964 return error;
968 * swsusp_close - close swap device.
971 void swsusp_close(fmode_t mode)
973 if (IS_ERR(hib_resume_bdev)) {
974 pr_debug("PM: Image device not initialised\n");
975 return;
978 blkdev_put(hib_resume_bdev, mode);
981 static int swsusp_header_init(void)
983 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
984 if (!swsusp_header)
985 panic("Could not allocate memory for swsusp_header\n");
986 return 0;
989 core_initcall(swsusp_header_init);