4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95,
7 * Asynchronous swapping added 30.12.95. Stephen Tweedie
8 * Removed race in async swapping. 14.4.1996. Bruno Haible
9 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
10 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
14 #include <linux/kernel_stat.h>
15 #include <linux/gfp.h>
16 #include <linux/pagemap.h>
17 #include <linux/swap.h>
18 #include <linux/bio.h>
19 #include <linux/swapops.h>
20 #include <linux/buffer_head.h>
21 #include <linux/writeback.h>
22 #include <linux/frontswap.h>
23 #include <linux/blkdev.h>
24 #include <linux/uio.h>
25 #include <asm/pgtable.h>
27 static struct bio
*get_swap_bio(gfp_t gfp_flags
,
28 struct page
*page
, bio_end_io_t end_io
)
32 bio
= bio_alloc(gfp_flags
, 1);
34 bio
->bi_iter
.bi_sector
= map_swap_page(page
, &bio
->bi_bdev
);
35 bio
->bi_iter
.bi_sector
<<= PAGE_SHIFT
- 9;
36 bio
->bi_end_io
= end_io
;
38 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
39 BUG_ON(bio
->bi_iter
.bi_size
!= PAGE_SIZE
);
44 void end_swap_bio_write(struct bio
*bio
)
46 struct page
*page
= bio
->bi_io_vec
[0].bv_page
;
51 * We failed to write the page out to swap-space.
52 * Re-dirty the page in order to avoid it being reclaimed.
53 * Also print a dire warning that things will go BAD (tm)
56 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
59 pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
60 imajor(bio
->bi_bdev
->bd_inode
),
61 iminor(bio
->bi_bdev
->bd_inode
),
62 (unsigned long long)bio
->bi_iter
.bi_sector
);
63 ClearPageReclaim(page
);
65 end_page_writeback(page
);
69 static void swap_slot_free_notify(struct page
*page
)
71 struct swap_info_struct
*sis
;
75 * There is no guarantee that the page is in swap cache - the software
76 * suspend code (at least) uses end_swap_bio_read() against a non-
77 * swapcache page. So we must check PG_swapcache before proceeding with
80 if (unlikely(!PageSwapCache(page
)))
83 sis
= page_swap_info(page
);
84 if (!(sis
->flags
& SWP_BLKDEV
))
88 * The swap subsystem performs lazy swap slot freeing,
89 * expecting that the page will be swapped out again.
90 * So we can avoid an unnecessary write if the page
92 * This is good for real swap storage because we can
93 * reduce unnecessary I/O and enhance wear-leveling
94 * if an SSD is used as the as swap device.
95 * But if in-memory swap device (eg zram) is used,
96 * this causes a duplicated copy between uncompressed
97 * data in VM-owned memory and compressed data in
98 * zram-owned memory. So let's free zram-owned memory
99 * and make the VM-owned decompressed page *dirty*,
100 * so the page should be swapped out somewhere again if
101 * we again wish to reclaim it.
103 disk
= sis
->bdev
->bd_disk
;
104 if (disk
->fops
->swap_slot_free_notify
) {
106 unsigned long offset
;
108 entry
.val
= page_private(page
);
109 offset
= swp_offset(entry
);
112 disk
->fops
->swap_slot_free_notify(sis
->bdev
,
117 static void end_swap_bio_read(struct bio
*bio
)
119 struct page
*page
= bio
->bi_io_vec
[0].bv_page
;
120 struct task_struct
*waiter
= bio
->bi_private
;
122 if (bio
->bi_status
) {
124 ClearPageUptodate(page
);
125 pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
126 imajor(bio
->bi_bdev
->bd_inode
),
127 iminor(bio
->bi_bdev
->bd_inode
),
128 (unsigned long long)bio
->bi_iter
.bi_sector
);
132 SetPageUptodate(page
);
133 swap_slot_free_notify(page
);
136 WRITE_ONCE(bio
->bi_private
, NULL
);
138 wake_up_process(waiter
);
141 int generic_swapfile_activate(struct swap_info_struct
*sis
,
142 struct file
*swap_file
,
145 struct address_space
*mapping
= swap_file
->f_mapping
;
146 struct inode
*inode
= mapping
->host
;
147 unsigned blocks_per_page
;
148 unsigned long page_no
;
150 sector_t probe_block
;
152 sector_t lowest_block
= -1;
153 sector_t highest_block
= 0;
157 blkbits
= inode
->i_blkbits
;
158 blocks_per_page
= PAGE_SIZE
>> blkbits
;
161 * Map all the blocks into the extent list. This code doesn't try
166 last_block
= i_size_read(inode
) >> blkbits
;
167 while ((probe_block
+ blocks_per_page
) <= last_block
&&
168 page_no
< sis
->max
) {
169 unsigned block_in_page
;
170 sector_t first_block
;
174 first_block
= bmap(inode
, probe_block
);
175 if (first_block
== 0)
179 * It must be PAGE_SIZE aligned on-disk
181 if (first_block
& (blocks_per_page
- 1)) {
186 for (block_in_page
= 1; block_in_page
< blocks_per_page
;
190 block
= bmap(inode
, probe_block
+ block_in_page
);
193 if (block
!= first_block
+ block_in_page
) {
200 first_block
>>= (PAGE_SHIFT
- blkbits
);
201 if (page_no
) { /* exclude the header page */
202 if (first_block
< lowest_block
)
203 lowest_block
= first_block
;
204 if (first_block
> highest_block
)
205 highest_block
= first_block
;
209 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
211 ret
= add_swap_extent(sis
, page_no
, 1, first_block
);
216 probe_block
+= blocks_per_page
;
221 *span
= 1 + highest_block
- lowest_block
;
223 page_no
= 1; /* force Empty message */
225 sis
->pages
= page_no
- 1;
226 sis
->highest_bit
= page_no
- 1;
230 pr_err("swapon: swapfile has holes\n");
236 * We may have stale swap cache pages in memory: notice
237 * them here and get rid of the unnecessary final write.
239 int swap_writepage(struct page
*page
, struct writeback_control
*wbc
)
243 if (try_to_free_swap(page
)) {
247 if (frontswap_store(page
) == 0) {
248 set_page_writeback(page
);
250 end_page_writeback(page
);
253 ret
= __swap_writepage(page
, wbc
, end_swap_bio_write
);
258 static sector_t
swap_page_sector(struct page
*page
)
260 return (sector_t
)__page_file_index(page
) << (PAGE_SHIFT
- 9);
263 int __swap_writepage(struct page
*page
, struct writeback_control
*wbc
,
264 bio_end_io_t end_write_func
)
268 struct swap_info_struct
*sis
= page_swap_info(page
);
270 VM_BUG_ON_PAGE(!PageSwapCache(page
), page
);
271 if (sis
->flags
& SWP_FILE
) {
273 struct file
*swap_file
= sis
->swap_file
;
274 struct address_space
*mapping
= swap_file
->f_mapping
;
275 struct bio_vec bv
= {
280 struct iov_iter from
;
282 iov_iter_bvec(&from
, ITER_BVEC
| WRITE
, &bv
, 1, PAGE_SIZE
);
283 init_sync_kiocb(&kiocb
, swap_file
);
284 kiocb
.ki_pos
= page_file_offset(page
);
286 set_page_writeback(page
);
288 ret
= mapping
->a_ops
->direct_IO(&kiocb
, &from
);
289 if (ret
== PAGE_SIZE
) {
290 count_vm_event(PSWPOUT
);
294 * In the case of swap-over-nfs, this can be a
295 * temporary failure if the system has limited
296 * memory for allocating transmit buffers.
297 * Mark the page dirty and avoid
298 * rotate_reclaimable_page but rate-limit the
299 * messages but do not flag PageError like
300 * the normal direct-to-bio case as it could
303 set_page_dirty(page
);
304 ClearPageReclaim(page
);
305 pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
306 page_file_offset(page
));
308 end_page_writeback(page
);
312 ret
= bdev_write_page(sis
->bdev
, swap_page_sector(page
), page
, wbc
);
314 count_vm_event(PSWPOUT
);
319 bio
= get_swap_bio(GFP_NOIO
, page
, end_write_func
);
321 set_page_dirty(page
);
326 bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
327 count_vm_event(PSWPOUT
);
328 set_page_writeback(page
);
335 int swap_readpage(struct page
*page
, bool do_poll
)
339 struct swap_info_struct
*sis
= page_swap_info(page
);
341 struct block_device
*bdev
;
343 VM_BUG_ON_PAGE(!PageSwapCache(page
), page
);
344 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
345 VM_BUG_ON_PAGE(PageUptodate(page
), page
);
346 if (frontswap_load(page
) == 0) {
347 SetPageUptodate(page
);
352 if (sis
->flags
& SWP_FILE
) {
353 struct file
*swap_file
= sis
->swap_file
;
354 struct address_space
*mapping
= swap_file
->f_mapping
;
356 ret
= mapping
->a_ops
->readpage(swap_file
, page
);
358 count_vm_event(PSWPIN
);
362 ret
= bdev_read_page(sis
->bdev
, swap_page_sector(page
), page
);
364 if (trylock_page(page
)) {
365 swap_slot_free_notify(page
);
369 count_vm_event(PSWPIN
);
374 bio
= get_swap_bio(GFP_KERNEL
, page
, end_swap_bio_read
);
381 bio
->bi_private
= current
;
382 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
383 count_vm_event(PSWPIN
);
385 qc
= submit_bio(bio
);
387 set_current_state(TASK_UNINTERRUPTIBLE
);
388 if (!READ_ONCE(bio
->bi_private
))
391 if (!blk_mq_poll(bdev_get_queue(bdev
), qc
))
394 __set_current_state(TASK_RUNNING
);
401 int swap_set_page_dirty(struct page
*page
)
403 struct swap_info_struct
*sis
= page_swap_info(page
);
405 if (sis
->flags
& SWP_FILE
) {
406 struct address_space
*mapping
= sis
->swap_file
->f_mapping
;
408 VM_BUG_ON_PAGE(!PageSwapCache(page
), page
);
409 return mapping
->a_ops
->set_page_dirty(page
);
411 return __set_page_dirty_no_writeback(page
);