1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/writeback.h>
23 #include <linux/frontswap.h>
24 #include <linux/blkdev.h>
25 #include <linux/psi.h>
26 #include <linux/uio.h>
27 #include <linux/sched/task.h>
29 static struct bio
*get_swap_bio(gfp_t gfp_flags
,
30 struct page
*page
, bio_end_io_t end_io
)
34 bio
= bio_alloc(gfp_flags
, 1);
36 struct block_device
*bdev
;
38 bio
->bi_iter
.bi_sector
= map_swap_page(page
, &bdev
);
39 bio_set_dev(bio
, bdev
);
40 bio
->bi_iter
.bi_sector
<<= PAGE_SHIFT
- 9;
41 bio
->bi_end_io
= end_io
;
43 bio_add_page(bio
, page
, thp_size(page
), 0);
48 void end_swap_bio_write(struct bio
*bio
)
50 struct page
*page
= bio_first_page_all(bio
);
55 * We failed to write the page out to swap-space.
56 * Re-dirty the page in order to avoid it being reclaimed.
57 * Also print a dire warning that things will go BAD (tm)
60 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
63 pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
64 MAJOR(bio_dev(bio
)), MINOR(bio_dev(bio
)),
65 (unsigned long long)bio
->bi_iter
.bi_sector
);
66 ClearPageReclaim(page
);
68 end_page_writeback(page
);
72 static void swap_slot_free_notify(struct page
*page
)
74 struct swap_info_struct
*sis
;
79 * There is no guarantee that the page is in swap cache - the software
80 * suspend code (at least) uses end_swap_bio_read() against a non-
81 * swapcache page. So we must check PG_swapcache before proceeding with
84 if (unlikely(!PageSwapCache(page
)))
87 sis
= page_swap_info(page
);
88 if (data_race(!(sis
->flags
& SWP_BLKDEV
)))
92 * The swap subsystem performs lazy swap slot freeing,
93 * expecting that the page will be swapped out again.
94 * So we can avoid an unnecessary write if the page
96 * This is good for real swap storage because we can
97 * reduce unnecessary I/O and enhance wear-leveling
98 * if an SSD is used as the as swap device.
99 * But if in-memory swap device (eg zram) is used,
100 * this causes a duplicated copy between uncompressed
101 * data in VM-owned memory and compressed data in
102 * zram-owned memory. So let's free zram-owned memory
103 * and make the VM-owned decompressed page *dirty*,
104 * so the page should be swapped out somewhere again if
105 * we again wish to reclaim it.
107 disk
= sis
->bdev
->bd_disk
;
108 entry
.val
= page_private(page
);
109 if (disk
->fops
->swap_slot_free_notify
&& __swap_count(entry
) == 1) {
110 unsigned long offset
;
112 offset
= swp_offset(entry
);
115 disk
->fops
->swap_slot_free_notify(sis
->bdev
,
120 static void end_swap_bio_read(struct bio
*bio
)
122 struct page
*page
= bio_first_page_all(bio
);
123 struct task_struct
*waiter
= bio
->bi_private
;
125 if (bio
->bi_status
) {
127 ClearPageUptodate(page
);
128 pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
129 MAJOR(bio_dev(bio
)), MINOR(bio_dev(bio
)),
130 (unsigned long long)bio
->bi_iter
.bi_sector
);
134 SetPageUptodate(page
);
135 swap_slot_free_notify(page
);
138 WRITE_ONCE(bio
->bi_private
, NULL
);
141 blk_wake_io_task(waiter
);
142 put_task_struct(waiter
);
146 int generic_swapfile_activate(struct swap_info_struct
*sis
,
147 struct file
*swap_file
,
150 struct address_space
*mapping
= swap_file
->f_mapping
;
151 struct inode
*inode
= mapping
->host
;
152 unsigned blocks_per_page
;
153 unsigned long page_no
;
155 sector_t probe_block
;
157 sector_t lowest_block
= -1;
158 sector_t highest_block
= 0;
162 blkbits
= inode
->i_blkbits
;
163 blocks_per_page
= PAGE_SIZE
>> blkbits
;
166 * Map all the blocks into the extent tree. This code doesn't try
171 last_block
= i_size_read(inode
) >> blkbits
;
172 while ((probe_block
+ blocks_per_page
) <= last_block
&&
173 page_no
< sis
->max
) {
174 unsigned block_in_page
;
175 sector_t first_block
;
179 first_block
= probe_block
;
180 ret
= bmap(inode
, &first_block
);
181 if (ret
|| !first_block
)
185 * It must be PAGE_SIZE aligned on-disk
187 if (first_block
& (blocks_per_page
- 1)) {
192 for (block_in_page
= 1; block_in_page
< blocks_per_page
;
196 block
= probe_block
+ block_in_page
;
197 ret
= bmap(inode
, &block
);
201 if (block
!= first_block
+ block_in_page
) {
208 first_block
>>= (PAGE_SHIFT
- blkbits
);
209 if (page_no
) { /* exclude the header page */
210 if (first_block
< lowest_block
)
211 lowest_block
= first_block
;
212 if (first_block
> highest_block
)
213 highest_block
= first_block
;
217 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
219 ret
= add_swap_extent(sis
, page_no
, 1, first_block
);
224 probe_block
+= blocks_per_page
;
229 *span
= 1 + highest_block
- lowest_block
;
231 page_no
= 1; /* force Empty message */
233 sis
->pages
= page_no
- 1;
234 sis
->highest_bit
= page_no
- 1;
238 pr_err("swapon: swapfile has holes\n");
244 * We may have stale swap cache pages in memory: notice
245 * them here and get rid of the unnecessary final write.
247 int swap_writepage(struct page
*page
, struct writeback_control
*wbc
)
251 if (try_to_free_swap(page
)) {
256 * Arch code may have to preserve more data than just the page
257 * contents, e.g. memory tags.
259 ret
= arch_prepare_to_swap(page
);
261 set_page_dirty(page
);
265 if (frontswap_store(page
) == 0) {
266 set_page_writeback(page
);
268 end_page_writeback(page
);
271 ret
= __swap_writepage(page
, wbc
, end_swap_bio_write
);
276 static sector_t
swap_page_sector(struct page
*page
)
278 return (sector_t
)__page_file_index(page
) << (PAGE_SHIFT
- 9);
281 static inline void count_swpout_vm_event(struct page
*page
)
283 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
284 if (unlikely(PageTransHuge(page
)))
285 count_vm_event(THP_SWPOUT
);
287 count_vm_events(PSWPOUT
, thp_nr_pages(page
));
290 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
291 static void bio_associate_blkg_from_page(struct bio
*bio
, struct page
*page
)
293 struct cgroup_subsys_state
*css
;
294 struct mem_cgroup
*memcg
;
296 memcg
= page_memcg(page
);
301 css
= cgroup_e_css(memcg
->css
.cgroup
, &io_cgrp_subsys
);
302 bio_associate_blkg_from_css(bio
, css
);
306 #define bio_associate_blkg_from_page(bio, page) do { } while (0)
307 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
309 int __swap_writepage(struct page
*page
, struct writeback_control
*wbc
,
310 bio_end_io_t end_write_func
)
314 struct swap_info_struct
*sis
= page_swap_info(page
);
316 VM_BUG_ON_PAGE(!PageSwapCache(page
), page
);
317 if (data_race(sis
->flags
& SWP_FS_OPS
)) {
319 struct file
*swap_file
= sis
->swap_file
;
320 struct address_space
*mapping
= swap_file
->f_mapping
;
321 struct bio_vec bv
= {
326 struct iov_iter from
;
328 iov_iter_bvec(&from
, WRITE
, &bv
, 1, PAGE_SIZE
);
329 init_sync_kiocb(&kiocb
, swap_file
);
330 kiocb
.ki_pos
= page_file_offset(page
);
332 set_page_writeback(page
);
334 ret
= mapping
->a_ops
->direct_IO(&kiocb
, &from
);
335 if (ret
== PAGE_SIZE
) {
336 count_vm_event(PSWPOUT
);
340 * In the case of swap-over-nfs, this can be a
341 * temporary failure if the system has limited
342 * memory for allocating transmit buffers.
343 * Mark the page dirty and avoid
344 * rotate_reclaimable_page but rate-limit the
345 * messages but do not flag PageError like
346 * the normal direct-to-bio case as it could
349 set_page_dirty(page
);
350 ClearPageReclaim(page
);
351 pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
352 page_file_offset(page
));
354 end_page_writeback(page
);
358 ret
= bdev_write_page(sis
->bdev
, swap_page_sector(page
), page
, wbc
);
360 count_swpout_vm_event(page
);
364 bio
= get_swap_bio(GFP_NOIO
, page
, end_write_func
);
366 set_page_dirty(page
);
370 bio
->bi_opf
= REQ_OP_WRITE
| REQ_SWAP
| wbc_to_write_flags(wbc
);
371 bio_associate_blkg_from_page(bio
, page
);
372 count_swpout_vm_event(page
);
373 set_page_writeback(page
);
380 int swap_readpage(struct page
*page
, bool synchronous
)
384 struct swap_info_struct
*sis
= page_swap_info(page
);
386 struct gendisk
*disk
;
387 unsigned long pflags
;
389 VM_BUG_ON_PAGE(!PageSwapCache(page
) && !synchronous
, page
);
390 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
391 VM_BUG_ON_PAGE(PageUptodate(page
), page
);
394 * Count submission time as memory stall. When the device is congested,
395 * or the submitting cgroup IO-throttled, submission can be a
396 * significant part of overall IO time.
398 psi_memstall_enter(&pflags
);
400 if (frontswap_load(page
) == 0) {
401 SetPageUptodate(page
);
406 if (data_race(sis
->flags
& SWP_FS_OPS
)) {
407 struct file
*swap_file
= sis
->swap_file
;
408 struct address_space
*mapping
= swap_file
->f_mapping
;
410 ret
= mapping
->a_ops
->readpage(swap_file
, page
);
412 count_vm_event(PSWPIN
);
416 if (sis
->flags
& SWP_SYNCHRONOUS_IO
) {
417 ret
= bdev_read_page(sis
->bdev
, swap_page_sector(page
), page
);
419 if (trylock_page(page
)) {
420 swap_slot_free_notify(page
);
424 count_vm_event(PSWPIN
);
430 bio
= get_swap_bio(GFP_KERNEL
, page
, end_swap_bio_read
);
438 * Keep this task valid during swap readpage because the oom killer may
439 * attempt to access it in the page fault retry time check.
441 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
443 bio
->bi_opf
|= REQ_HIPRI
;
444 get_task_struct(current
);
445 bio
->bi_private
= current
;
447 count_vm_event(PSWPIN
);
449 qc
= submit_bio(bio
);
450 while (synchronous
) {
451 set_current_state(TASK_UNINTERRUPTIBLE
);
452 if (!READ_ONCE(bio
->bi_private
))
455 if (!blk_poll(disk
->queue
, qc
, true))
458 __set_current_state(TASK_RUNNING
);
462 psi_memstall_leave(&pflags
);
466 int swap_set_page_dirty(struct page
*page
)
468 struct swap_info_struct
*sis
= page_swap_info(page
);
470 if (data_race(sis
->flags
& SWP_FS_OPS
)) {
471 struct address_space
*mapping
= sis
->swap_file
->f_mapping
;
473 VM_BUG_ON_PAGE(!PageSwapCache(page
), page
);
474 return mapping
->a_ops
->set_page_dirty(page
);
476 return __set_page_dirty_no_writeback(page
);