1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/psi.h>
24 #include <linux/uio.h>
25 #include <linux/sched/task.h>
26 #include <linux/delayacct.h>
27 #include <linux/zswap.h>
30 static void __end_swap_bio_write(struct bio
*bio
)
32 struct folio
*folio
= bio_first_folio_all(bio
);
36 * We failed to write the page out to swap-space.
37 * Re-dirty the page in order to avoid it being reclaimed.
38 * Also print a dire warning that things will go BAD (tm)
41 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
43 folio_mark_dirty(folio
);
44 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
45 MAJOR(bio_dev(bio
)), MINOR(bio_dev(bio
)),
46 (unsigned long long)bio
->bi_iter
.bi_sector
);
47 folio_clear_reclaim(folio
);
49 folio_end_writeback(folio
);
52 static void end_swap_bio_write(struct bio
*bio
)
54 __end_swap_bio_write(bio
);
58 static void __end_swap_bio_read(struct bio
*bio
)
60 struct folio
*folio
= bio_first_folio_all(bio
);
63 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
64 MAJOR(bio_dev(bio
)), MINOR(bio_dev(bio
)),
65 (unsigned long long)bio
->bi_iter
.bi_sector
);
67 folio_mark_uptodate(folio
);
72 static void end_swap_bio_read(struct bio
*bio
)
74 __end_swap_bio_read(bio
);
78 int generic_swapfile_activate(struct swap_info_struct
*sis
,
79 struct file
*swap_file
,
82 struct address_space
*mapping
= swap_file
->f_mapping
;
83 struct inode
*inode
= mapping
->host
;
84 unsigned blocks_per_page
;
85 unsigned long page_no
;
89 sector_t lowest_block
= -1;
90 sector_t highest_block
= 0;
94 blkbits
= inode
->i_blkbits
;
95 blocks_per_page
= PAGE_SIZE
>> blkbits
;
98 * Map all the blocks into the extent tree. This code doesn't try
103 last_block
= i_size_read(inode
) >> blkbits
;
104 while ((probe_block
+ blocks_per_page
) <= last_block
&&
105 page_no
< sis
->max
) {
106 unsigned block_in_page
;
107 sector_t first_block
;
111 first_block
= probe_block
;
112 ret
= bmap(inode
, &first_block
);
113 if (ret
|| !first_block
)
117 * It must be PAGE_SIZE aligned on-disk
119 if (first_block
& (blocks_per_page
- 1)) {
124 for (block_in_page
= 1; block_in_page
< blocks_per_page
;
128 block
= probe_block
+ block_in_page
;
129 ret
= bmap(inode
, &block
);
133 if (block
!= first_block
+ block_in_page
) {
140 first_block
>>= (PAGE_SHIFT
- blkbits
);
141 if (page_no
) { /* exclude the header page */
142 if (first_block
< lowest_block
)
143 lowest_block
= first_block
;
144 if (first_block
> highest_block
)
145 highest_block
= first_block
;
149 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
151 ret
= add_swap_extent(sis
, page_no
, 1, first_block
);
156 probe_block
+= blocks_per_page
;
161 *span
= 1 + highest_block
- lowest_block
;
163 page_no
= 1; /* force Empty message */
165 sis
->pages
= page_no
- 1;
166 sis
->highest_bit
= page_no
- 1;
170 pr_err("swapon: swapfile has holes\n");
175 static bool is_folio_zero_filled(struct folio
*folio
)
177 unsigned int pos
, last_pos
;
181 last_pos
= PAGE_SIZE
/ sizeof(*data
) - 1;
182 for (i
= 0; i
< folio_nr_pages(folio
); i
++) {
183 data
= kmap_local_folio(folio
, i
* PAGE_SIZE
);
185 * Check last word first, incase the page is zero-filled at
186 * the start and has non-zero data at the end, which is common
187 * in real-world workloads.
189 if (data
[last_pos
]) {
193 for (pos
= 0; pos
< last_pos
; pos
++) {
205 static void swap_zeromap_folio_set(struct folio
*folio
)
207 struct obj_cgroup
*objcg
= get_obj_cgroup_from_folio(folio
);
208 struct swap_info_struct
*sis
= swp_swap_info(folio
->swap
);
209 int nr_pages
= folio_nr_pages(folio
);
213 for (i
= 0; i
< folio_nr_pages(folio
); i
++) {
214 entry
= page_swap_entry(folio_page(folio
, i
));
215 set_bit(swp_offset(entry
), sis
->zeromap
);
218 count_vm_events(SWPOUT_ZERO
, nr_pages
);
220 count_objcg_events(objcg
, SWPOUT_ZERO
, nr_pages
);
221 obj_cgroup_put(objcg
);
225 static void swap_zeromap_folio_clear(struct folio
*folio
)
227 struct swap_info_struct
*sis
= swp_swap_info(folio
->swap
);
231 for (i
= 0; i
< folio_nr_pages(folio
); i
++) {
232 entry
= page_swap_entry(folio_page(folio
, i
));
233 clear_bit(swp_offset(entry
), sis
->zeromap
);
238 * We may have stale swap cache pages in memory: notice
239 * them here and get rid of the unnecessary final write.
241 int swap_writepage(struct page
*page
, struct writeback_control
*wbc
)
243 struct folio
*folio
= page_folio(page
);
246 if (folio_free_swap(folio
)) {
251 * Arch code may have to preserve more data than just the page
252 * contents, e.g. memory tags.
254 ret
= arch_prepare_to_swap(folio
);
256 folio_mark_dirty(folio
);
262 * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages.
263 * The bits in zeromap are protected by the locked swapcache folio
264 * and atomic updates are used to protect against read-modify-write
265 * corruption due to other zero swap entries seeing concurrent updates.
267 if (is_folio_zero_filled(folio
)) {
268 swap_zeromap_folio_set(folio
);
273 * Clear bits this folio occupies in the zeromap to prevent
274 * zero data being read in from any previous zero writes that
275 * occupied the same swap entries.
277 swap_zeromap_folio_clear(folio
);
279 if (zswap_store(folio
)) {
280 count_mthp_stat(folio_order(folio
), MTHP_STAT_ZSWPOUT
);
284 if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio
))) {
285 folio_mark_dirty(folio
);
286 return AOP_WRITEPAGE_ACTIVATE
;
289 __swap_writepage(folio
, wbc
);
293 static inline void count_swpout_vm_event(struct folio
*folio
)
295 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
296 if (unlikely(folio_test_pmd_mappable(folio
))) {
297 count_memcg_folio_events(folio
, THP_SWPOUT
, 1);
298 count_vm_event(THP_SWPOUT
);
301 count_mthp_stat(folio_order(folio
), MTHP_STAT_SWPOUT
);
302 count_memcg_folio_events(folio
, PSWPOUT
, folio_nr_pages(folio
));
303 count_vm_events(PSWPOUT
, folio_nr_pages(folio
));
306 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
307 static void bio_associate_blkg_from_page(struct bio
*bio
, struct folio
*folio
)
309 struct cgroup_subsys_state
*css
;
310 struct mem_cgroup
*memcg
;
312 memcg
= folio_memcg(folio
);
317 css
= cgroup_e_css(memcg
->css
.cgroup
, &io_cgrp_subsys
);
318 bio_associate_blkg_from_css(bio
, css
);
322 #define bio_associate_blkg_from_page(bio, folio) do { } while (0)
323 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
327 struct bio_vec bvec
[SWAP_CLUSTER_MAX
];
331 static mempool_t
*sio_pool
;
333 int sio_pool_init(void)
336 mempool_t
*pool
= mempool_create_kmalloc_pool(
337 SWAP_CLUSTER_MAX
, sizeof(struct swap_iocb
));
338 if (cmpxchg(&sio_pool
, NULL
, pool
))
339 mempool_destroy(pool
);
346 static void sio_write_complete(struct kiocb
*iocb
, long ret
)
348 struct swap_iocb
*sio
= container_of(iocb
, struct swap_iocb
, iocb
);
349 struct page
*page
= sio
->bvec
[0].bv_page
;
352 if (ret
!= sio
->len
) {
354 * In the case of swap-over-nfs, this can be a
355 * temporary failure if the system has limited
356 * memory for allocating transmit buffers.
357 * Mark the page dirty and avoid
358 * folio_rotate_reclaimable but rate-limit the
361 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
362 ret
, swap_dev_pos(page_swap_entry(page
)));
363 for (p
= 0; p
< sio
->pages
; p
++) {
364 page
= sio
->bvec
[p
].bv_page
;
365 set_page_dirty(page
);
366 ClearPageReclaim(page
);
370 for (p
= 0; p
< sio
->pages
; p
++)
371 end_page_writeback(sio
->bvec
[p
].bv_page
);
373 mempool_free(sio
, sio_pool
);
376 static void swap_writepage_fs(struct folio
*folio
, struct writeback_control
*wbc
)
378 struct swap_iocb
*sio
= NULL
;
379 struct swap_info_struct
*sis
= swp_swap_info(folio
->swap
);
380 struct file
*swap_file
= sis
->swap_file
;
381 loff_t pos
= swap_dev_pos(folio
->swap
);
383 count_swpout_vm_event(folio
);
384 folio_start_writeback(folio
);
387 sio
= *wbc
->swap_plug
;
389 if (sio
->iocb
.ki_filp
!= swap_file
||
390 sio
->iocb
.ki_pos
+ sio
->len
!= pos
) {
391 swap_write_unplug(sio
);
396 sio
= mempool_alloc(sio_pool
, GFP_NOIO
);
397 init_sync_kiocb(&sio
->iocb
, swap_file
);
398 sio
->iocb
.ki_complete
= sio_write_complete
;
399 sio
->iocb
.ki_pos
= pos
;
403 bvec_set_folio(&sio
->bvec
[sio
->pages
], folio
, folio_size(folio
), 0);
404 sio
->len
+= folio_size(folio
);
406 if (sio
->pages
== ARRAY_SIZE(sio
->bvec
) || !wbc
->swap_plug
) {
407 swap_write_unplug(sio
);
411 *wbc
->swap_plug
= sio
;
414 static void swap_writepage_bdev_sync(struct folio
*folio
,
415 struct writeback_control
*wbc
, struct swap_info_struct
*sis
)
420 bio_init(&bio
, sis
->bdev
, &bv
, 1,
421 REQ_OP_WRITE
| REQ_SWAP
| wbc_to_write_flags(wbc
));
422 bio
.bi_iter
.bi_sector
= swap_folio_sector(folio
);
423 bio_add_folio_nofail(&bio
, folio
, folio_size(folio
), 0);
425 bio_associate_blkg_from_page(&bio
, folio
);
426 count_swpout_vm_event(folio
);
428 folio_start_writeback(folio
);
431 submit_bio_wait(&bio
);
432 __end_swap_bio_write(&bio
);
435 static void swap_writepage_bdev_async(struct folio
*folio
,
436 struct writeback_control
*wbc
, struct swap_info_struct
*sis
)
440 bio
= bio_alloc(sis
->bdev
, 1,
441 REQ_OP_WRITE
| REQ_SWAP
| wbc_to_write_flags(wbc
),
443 bio
->bi_iter
.bi_sector
= swap_folio_sector(folio
);
444 bio
->bi_end_io
= end_swap_bio_write
;
445 bio_add_folio_nofail(bio
, folio
, folio_size(folio
), 0);
447 bio_associate_blkg_from_page(bio
, folio
);
448 count_swpout_vm_event(folio
);
449 folio_start_writeback(folio
);
454 void __swap_writepage(struct folio
*folio
, struct writeback_control
*wbc
)
456 struct swap_info_struct
*sis
= swp_swap_info(folio
->swap
);
458 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio
), folio
);
460 * ->flags can be updated non-atomicially (scan_swap_map_slots),
461 * but that will never affect SWP_FS_OPS, so the data_race
464 if (data_race(sis
->flags
& SWP_FS_OPS
))
465 swap_writepage_fs(folio
, wbc
);
467 * ->flags can be updated non-atomicially (scan_swap_map_slots),
468 * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race
471 else if (data_race(sis
->flags
& SWP_SYNCHRONOUS_IO
))
472 swap_writepage_bdev_sync(folio
, wbc
, sis
);
474 swap_writepage_bdev_async(folio
, wbc
, sis
);
477 void swap_write_unplug(struct swap_iocb
*sio
)
479 struct iov_iter from
;
480 struct address_space
*mapping
= sio
->iocb
.ki_filp
->f_mapping
;
483 iov_iter_bvec(&from
, ITER_SOURCE
, sio
->bvec
, sio
->pages
, sio
->len
);
484 ret
= mapping
->a_ops
->swap_rw(&sio
->iocb
, &from
);
485 if (ret
!= -EIOCBQUEUED
)
486 sio_write_complete(&sio
->iocb
, ret
);
489 static void sio_read_complete(struct kiocb
*iocb
, long ret
)
491 struct swap_iocb
*sio
= container_of(iocb
, struct swap_iocb
, iocb
);
494 if (ret
== sio
->len
) {
495 for (p
= 0; p
< sio
->pages
; p
++) {
496 struct folio
*folio
= page_folio(sio
->bvec
[p
].bv_page
);
498 count_mthp_stat(folio_order(folio
), MTHP_STAT_SWPIN
);
499 count_memcg_folio_events(folio
, PSWPIN
, folio_nr_pages(folio
));
500 folio_mark_uptodate(folio
);
503 count_vm_events(PSWPIN
, sio
->pages
);
505 for (p
= 0; p
< sio
->pages
; p
++) {
506 struct folio
*folio
= page_folio(sio
->bvec
[p
].bv_page
);
510 pr_alert_ratelimited("Read-error on swap-device\n");
512 mempool_free(sio
, sio_pool
);
515 static bool swap_read_folio_zeromap(struct folio
*folio
)
517 int nr_pages
= folio_nr_pages(folio
);
518 struct obj_cgroup
*objcg
;
522 * Swapping in a large folio that is partially in the zeromap is not
523 * currently handled. Return true without marking the folio uptodate so
524 * that an IO error is emitted (e.g. do_swap_page() will sigbus).
526 if (WARN_ON_ONCE(swap_zeromap_batch(folio
->swap
, nr_pages
,
527 &is_zeromap
) != nr_pages
))
533 objcg
= get_obj_cgroup_from_folio(folio
);
534 count_vm_events(SWPIN_ZERO
, nr_pages
);
536 count_objcg_events(objcg
, SWPIN_ZERO
, nr_pages
);
537 obj_cgroup_put(objcg
);
540 folio_zero_range(folio
, 0, folio_size(folio
));
541 folio_mark_uptodate(folio
);
545 static void swap_read_folio_fs(struct folio
*folio
, struct swap_iocb
**plug
)
547 struct swap_info_struct
*sis
= swp_swap_info(folio
->swap
);
548 struct swap_iocb
*sio
= NULL
;
549 loff_t pos
= swap_dev_pos(folio
->swap
);
554 if (sio
->iocb
.ki_filp
!= sis
->swap_file
||
555 sio
->iocb
.ki_pos
+ sio
->len
!= pos
) {
556 swap_read_unplug(sio
);
561 sio
= mempool_alloc(sio_pool
, GFP_KERNEL
);
562 init_sync_kiocb(&sio
->iocb
, sis
->swap_file
);
563 sio
->iocb
.ki_pos
= pos
;
564 sio
->iocb
.ki_complete
= sio_read_complete
;
568 bvec_set_folio(&sio
->bvec
[sio
->pages
], folio
, folio_size(folio
), 0);
569 sio
->len
+= folio_size(folio
);
571 if (sio
->pages
== ARRAY_SIZE(sio
->bvec
) || !plug
) {
572 swap_read_unplug(sio
);
579 static void swap_read_folio_bdev_sync(struct folio
*folio
,
580 struct swap_info_struct
*sis
)
585 bio_init(&bio
, sis
->bdev
, &bv
, 1, REQ_OP_READ
);
586 bio
.bi_iter
.bi_sector
= swap_folio_sector(folio
);
587 bio_add_folio_nofail(&bio
, folio
, folio_size(folio
), 0);
589 * Keep this task valid during swap readpage because the oom killer may
590 * attempt to access it in the page fault retry time check.
592 get_task_struct(current
);
593 count_mthp_stat(folio_order(folio
), MTHP_STAT_SWPIN
);
594 count_memcg_folio_events(folio
, PSWPIN
, folio_nr_pages(folio
));
595 count_vm_events(PSWPIN
, folio_nr_pages(folio
));
596 submit_bio_wait(&bio
);
597 __end_swap_bio_read(&bio
);
598 put_task_struct(current
);
601 static void swap_read_folio_bdev_async(struct folio
*folio
,
602 struct swap_info_struct
*sis
)
606 bio
= bio_alloc(sis
->bdev
, 1, REQ_OP_READ
, GFP_KERNEL
);
607 bio
->bi_iter
.bi_sector
= swap_folio_sector(folio
);
608 bio
->bi_end_io
= end_swap_bio_read
;
609 bio_add_folio_nofail(bio
, folio
, folio_size(folio
), 0);
610 count_mthp_stat(folio_order(folio
), MTHP_STAT_SWPIN
);
611 count_memcg_folio_events(folio
, PSWPIN
, folio_nr_pages(folio
));
612 count_vm_events(PSWPIN
, folio_nr_pages(folio
));
616 void swap_read_folio(struct folio
*folio
, struct swap_iocb
**plug
)
618 struct swap_info_struct
*sis
= swp_swap_info(folio
->swap
);
619 bool synchronous
= sis
->flags
& SWP_SYNCHRONOUS_IO
;
620 bool workingset
= folio_test_workingset(folio
);
621 unsigned long pflags
;
624 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio
) && !synchronous
, folio
);
625 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
626 VM_BUG_ON_FOLIO(folio_test_uptodate(folio
), folio
);
629 * Count submission time as memory stall and delay. When the device
630 * is congested, or the submitting cgroup IO-throttled, submission
631 * can be a significant part of overall IO time.
634 delayacct_thrashing_start(&in_thrashing
);
635 psi_memstall_enter(&pflags
);
637 delayacct_swapin_start();
639 if (swap_read_folio_zeromap(folio
)) {
642 } else if (zswap_load(folio
)) {
647 /* We have to read from slower devices. Increase zswap protection. */
648 zswap_folio_swapin(folio
);
650 if (data_race(sis
->flags
& SWP_FS_OPS
)) {
651 swap_read_folio_fs(folio
, plug
);
652 } else if (synchronous
) {
653 swap_read_folio_bdev_sync(folio
, sis
);
655 swap_read_folio_bdev_async(folio
, sis
);
660 delayacct_thrashing_end(&in_thrashing
);
661 psi_memstall_leave(&pflags
);
663 delayacct_swapin_end();
666 void __swap_read_unplug(struct swap_iocb
*sio
)
668 struct iov_iter from
;
669 struct address_space
*mapping
= sio
->iocb
.ki_filp
->f_mapping
;
672 iov_iter_bvec(&from
, ITER_DEST
, sio
->bvec
, sio
->pages
, sio
->len
);
673 ret
= mapping
->a_ops
->swap_rw(&sio
->iocb
, &from
);
674 if (ret
!= -EIOCBQUEUED
)
675 sio_read_complete(&sio
->iocb
, ret
);