drm/ast: Only warn about unsupported TX chips on Gen4 and later
[drm/drm-misc.git] / mm / page_io.c
blob4b4ea8e49cf6957e6118bae5720c68a551c2bb04
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/page_io.c
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/psi.h>
24 #include <linux/uio.h>
25 #include <linux/sched/task.h>
26 #include <linux/delayacct.h>
27 #include <linux/zswap.h>
28 #include "swap.h"
30 static void __end_swap_bio_write(struct bio *bio)
32 struct folio *folio = bio_first_folio_all(bio);
34 if (bio->bi_status) {
36 * We failed to write the page out to swap-space.
37 * Re-dirty the page in order to avoid it being reclaimed.
38 * Also print a dire warning that things will go BAD (tm)
39 * very quickly.
41 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
43 folio_mark_dirty(folio);
44 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
45 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
46 (unsigned long long)bio->bi_iter.bi_sector);
47 folio_clear_reclaim(folio);
49 folio_end_writeback(folio);
52 static void end_swap_bio_write(struct bio *bio)
54 __end_swap_bio_write(bio);
55 bio_put(bio);
58 static void __end_swap_bio_read(struct bio *bio)
60 struct folio *folio = bio_first_folio_all(bio);
62 if (bio->bi_status) {
63 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
64 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
65 (unsigned long long)bio->bi_iter.bi_sector);
66 } else {
67 folio_mark_uptodate(folio);
69 folio_unlock(folio);
72 static void end_swap_bio_read(struct bio *bio)
74 __end_swap_bio_read(bio);
75 bio_put(bio);
78 int generic_swapfile_activate(struct swap_info_struct *sis,
79 struct file *swap_file,
80 sector_t *span)
82 struct address_space *mapping = swap_file->f_mapping;
83 struct inode *inode = mapping->host;
84 unsigned blocks_per_page;
85 unsigned long page_no;
86 unsigned blkbits;
87 sector_t probe_block;
88 sector_t last_block;
89 sector_t lowest_block = -1;
90 sector_t highest_block = 0;
91 int nr_extents = 0;
92 int ret;
94 blkbits = inode->i_blkbits;
95 blocks_per_page = PAGE_SIZE >> blkbits;
98 * Map all the blocks into the extent tree. This code doesn't try
99 * to be very smart.
101 probe_block = 0;
102 page_no = 0;
103 last_block = i_size_read(inode) >> blkbits;
104 while ((probe_block + blocks_per_page) <= last_block &&
105 page_no < sis->max) {
106 unsigned block_in_page;
107 sector_t first_block;
109 cond_resched();
111 first_block = probe_block;
112 ret = bmap(inode, &first_block);
113 if (ret || !first_block)
114 goto bad_bmap;
117 * It must be PAGE_SIZE aligned on-disk
119 if (first_block & (blocks_per_page - 1)) {
120 probe_block++;
121 goto reprobe;
124 for (block_in_page = 1; block_in_page < blocks_per_page;
125 block_in_page++) {
126 sector_t block;
128 block = probe_block + block_in_page;
129 ret = bmap(inode, &block);
130 if (ret || !block)
131 goto bad_bmap;
133 if (block != first_block + block_in_page) {
134 /* Discontiguity */
135 probe_block++;
136 goto reprobe;
140 first_block >>= (PAGE_SHIFT - blkbits);
141 if (page_no) { /* exclude the header page */
142 if (first_block < lowest_block)
143 lowest_block = first_block;
144 if (first_block > highest_block)
145 highest_block = first_block;
149 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
151 ret = add_swap_extent(sis, page_no, 1, first_block);
152 if (ret < 0)
153 goto out;
154 nr_extents += ret;
155 page_no++;
156 probe_block += blocks_per_page;
157 reprobe:
158 continue;
160 ret = nr_extents;
161 *span = 1 + highest_block - lowest_block;
162 if (page_no == 0)
163 page_no = 1; /* force Empty message */
164 sis->max = page_no;
165 sis->pages = page_no - 1;
166 sis->highest_bit = page_no - 1;
167 out:
168 return ret;
169 bad_bmap:
170 pr_err("swapon: swapfile has holes\n");
171 ret = -EINVAL;
172 goto out;
175 static bool is_folio_zero_filled(struct folio *folio)
177 unsigned int pos, last_pos;
178 unsigned long *data;
179 unsigned int i;
181 last_pos = PAGE_SIZE / sizeof(*data) - 1;
182 for (i = 0; i < folio_nr_pages(folio); i++) {
183 data = kmap_local_folio(folio, i * PAGE_SIZE);
185 * Check last word first, incase the page is zero-filled at
186 * the start and has non-zero data at the end, which is common
187 * in real-world workloads.
189 if (data[last_pos]) {
190 kunmap_local(data);
191 return false;
193 for (pos = 0; pos < last_pos; pos++) {
194 if (data[pos]) {
195 kunmap_local(data);
196 return false;
199 kunmap_local(data);
202 return true;
205 static void swap_zeromap_folio_set(struct folio *folio)
207 struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio);
208 struct swap_info_struct *sis = swp_swap_info(folio->swap);
209 int nr_pages = folio_nr_pages(folio);
210 swp_entry_t entry;
211 unsigned int i;
213 for (i = 0; i < folio_nr_pages(folio); i++) {
214 entry = page_swap_entry(folio_page(folio, i));
215 set_bit(swp_offset(entry), sis->zeromap);
218 count_vm_events(SWPOUT_ZERO, nr_pages);
219 if (objcg) {
220 count_objcg_events(objcg, SWPOUT_ZERO, nr_pages);
221 obj_cgroup_put(objcg);
225 static void swap_zeromap_folio_clear(struct folio *folio)
227 struct swap_info_struct *sis = swp_swap_info(folio->swap);
228 swp_entry_t entry;
229 unsigned int i;
231 for (i = 0; i < folio_nr_pages(folio); i++) {
232 entry = page_swap_entry(folio_page(folio, i));
233 clear_bit(swp_offset(entry), sis->zeromap);
238 * We may have stale swap cache pages in memory: notice
239 * them here and get rid of the unnecessary final write.
241 int swap_writepage(struct page *page, struct writeback_control *wbc)
243 struct folio *folio = page_folio(page);
244 int ret;
246 if (folio_free_swap(folio)) {
247 folio_unlock(folio);
248 return 0;
251 * Arch code may have to preserve more data than just the page
252 * contents, e.g. memory tags.
254 ret = arch_prepare_to_swap(folio);
255 if (ret) {
256 folio_mark_dirty(folio);
257 folio_unlock(folio);
258 return ret;
262 * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages.
263 * The bits in zeromap are protected by the locked swapcache folio
264 * and atomic updates are used to protect against read-modify-write
265 * corruption due to other zero swap entries seeing concurrent updates.
267 if (is_folio_zero_filled(folio)) {
268 swap_zeromap_folio_set(folio);
269 folio_unlock(folio);
270 return 0;
271 } else {
273 * Clear bits this folio occupies in the zeromap to prevent
274 * zero data being read in from any previous zero writes that
275 * occupied the same swap entries.
277 swap_zeromap_folio_clear(folio);
279 if (zswap_store(folio)) {
280 count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT);
281 folio_unlock(folio);
282 return 0;
284 if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
285 folio_mark_dirty(folio);
286 return AOP_WRITEPAGE_ACTIVATE;
289 __swap_writepage(folio, wbc);
290 return 0;
293 static inline void count_swpout_vm_event(struct folio *folio)
295 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
296 if (unlikely(folio_test_pmd_mappable(folio))) {
297 count_memcg_folio_events(folio, THP_SWPOUT, 1);
298 count_vm_event(THP_SWPOUT);
300 #endif
301 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
302 count_memcg_folio_events(folio, PSWPOUT, folio_nr_pages(folio));
303 count_vm_events(PSWPOUT, folio_nr_pages(folio));
306 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
307 static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
309 struct cgroup_subsys_state *css;
310 struct mem_cgroup *memcg;
312 memcg = folio_memcg(folio);
313 if (!memcg)
314 return;
316 rcu_read_lock();
317 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
318 bio_associate_blkg_from_css(bio, css);
319 rcu_read_unlock();
321 #else
322 #define bio_associate_blkg_from_page(bio, folio) do { } while (0)
323 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
325 struct swap_iocb {
326 struct kiocb iocb;
327 struct bio_vec bvec[SWAP_CLUSTER_MAX];
328 int pages;
329 int len;
331 static mempool_t *sio_pool;
333 int sio_pool_init(void)
335 if (!sio_pool) {
336 mempool_t *pool = mempool_create_kmalloc_pool(
337 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
338 if (cmpxchg(&sio_pool, NULL, pool))
339 mempool_destroy(pool);
341 if (!sio_pool)
342 return -ENOMEM;
343 return 0;
346 static void sio_write_complete(struct kiocb *iocb, long ret)
348 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
349 struct page *page = sio->bvec[0].bv_page;
350 int p;
352 if (ret != sio->len) {
354 * In the case of swap-over-nfs, this can be a
355 * temporary failure if the system has limited
356 * memory for allocating transmit buffers.
357 * Mark the page dirty and avoid
358 * folio_rotate_reclaimable but rate-limit the
359 * messages.
361 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
362 ret, swap_dev_pos(page_swap_entry(page)));
363 for (p = 0; p < sio->pages; p++) {
364 page = sio->bvec[p].bv_page;
365 set_page_dirty(page);
366 ClearPageReclaim(page);
370 for (p = 0; p < sio->pages; p++)
371 end_page_writeback(sio->bvec[p].bv_page);
373 mempool_free(sio, sio_pool);
376 static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
378 struct swap_iocb *sio = NULL;
379 struct swap_info_struct *sis = swp_swap_info(folio->swap);
380 struct file *swap_file = sis->swap_file;
381 loff_t pos = swap_dev_pos(folio->swap);
383 count_swpout_vm_event(folio);
384 folio_start_writeback(folio);
385 folio_unlock(folio);
386 if (wbc->swap_plug)
387 sio = *wbc->swap_plug;
388 if (sio) {
389 if (sio->iocb.ki_filp != swap_file ||
390 sio->iocb.ki_pos + sio->len != pos) {
391 swap_write_unplug(sio);
392 sio = NULL;
395 if (!sio) {
396 sio = mempool_alloc(sio_pool, GFP_NOIO);
397 init_sync_kiocb(&sio->iocb, swap_file);
398 sio->iocb.ki_complete = sio_write_complete;
399 sio->iocb.ki_pos = pos;
400 sio->pages = 0;
401 sio->len = 0;
403 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
404 sio->len += folio_size(folio);
405 sio->pages += 1;
406 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
407 swap_write_unplug(sio);
408 sio = NULL;
410 if (wbc->swap_plug)
411 *wbc->swap_plug = sio;
414 static void swap_writepage_bdev_sync(struct folio *folio,
415 struct writeback_control *wbc, struct swap_info_struct *sis)
417 struct bio_vec bv;
418 struct bio bio;
420 bio_init(&bio, sis->bdev, &bv, 1,
421 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
422 bio.bi_iter.bi_sector = swap_folio_sector(folio);
423 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
425 bio_associate_blkg_from_page(&bio, folio);
426 count_swpout_vm_event(folio);
428 folio_start_writeback(folio);
429 folio_unlock(folio);
431 submit_bio_wait(&bio);
432 __end_swap_bio_write(&bio);
435 static void swap_writepage_bdev_async(struct folio *folio,
436 struct writeback_control *wbc, struct swap_info_struct *sis)
438 struct bio *bio;
440 bio = bio_alloc(sis->bdev, 1,
441 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
442 GFP_NOIO);
443 bio->bi_iter.bi_sector = swap_folio_sector(folio);
444 bio->bi_end_io = end_swap_bio_write;
445 bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
447 bio_associate_blkg_from_page(bio, folio);
448 count_swpout_vm_event(folio);
449 folio_start_writeback(folio);
450 folio_unlock(folio);
451 submit_bio(bio);
454 void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
456 struct swap_info_struct *sis = swp_swap_info(folio->swap);
458 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
460 * ->flags can be updated non-atomicially (scan_swap_map_slots),
461 * but that will never affect SWP_FS_OPS, so the data_race
462 * is safe.
464 if (data_race(sis->flags & SWP_FS_OPS))
465 swap_writepage_fs(folio, wbc);
467 * ->flags can be updated non-atomicially (scan_swap_map_slots),
468 * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race
469 * is safe.
471 else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO))
472 swap_writepage_bdev_sync(folio, wbc, sis);
473 else
474 swap_writepage_bdev_async(folio, wbc, sis);
477 void swap_write_unplug(struct swap_iocb *sio)
479 struct iov_iter from;
480 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
481 int ret;
483 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
484 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
485 if (ret != -EIOCBQUEUED)
486 sio_write_complete(&sio->iocb, ret);
489 static void sio_read_complete(struct kiocb *iocb, long ret)
491 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
492 int p;
494 if (ret == sio->len) {
495 for (p = 0; p < sio->pages; p++) {
496 struct folio *folio = page_folio(sio->bvec[p].bv_page);
498 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
499 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
500 folio_mark_uptodate(folio);
501 folio_unlock(folio);
503 count_vm_events(PSWPIN, sio->pages);
504 } else {
505 for (p = 0; p < sio->pages; p++) {
506 struct folio *folio = page_folio(sio->bvec[p].bv_page);
508 folio_unlock(folio);
510 pr_alert_ratelimited("Read-error on swap-device\n");
512 mempool_free(sio, sio_pool);
515 static bool swap_read_folio_zeromap(struct folio *folio)
517 int nr_pages = folio_nr_pages(folio);
518 struct obj_cgroup *objcg;
519 bool is_zeromap;
522 * Swapping in a large folio that is partially in the zeromap is not
523 * currently handled. Return true without marking the folio uptodate so
524 * that an IO error is emitted (e.g. do_swap_page() will sigbus).
526 if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages,
527 &is_zeromap) != nr_pages))
528 return true;
530 if (!is_zeromap)
531 return false;
533 objcg = get_obj_cgroup_from_folio(folio);
534 count_vm_events(SWPIN_ZERO, nr_pages);
535 if (objcg) {
536 count_objcg_events(objcg, SWPIN_ZERO, nr_pages);
537 obj_cgroup_put(objcg);
540 folio_zero_range(folio, 0, folio_size(folio));
541 folio_mark_uptodate(folio);
542 return true;
545 static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
547 struct swap_info_struct *sis = swp_swap_info(folio->swap);
548 struct swap_iocb *sio = NULL;
549 loff_t pos = swap_dev_pos(folio->swap);
551 if (plug)
552 sio = *plug;
553 if (sio) {
554 if (sio->iocb.ki_filp != sis->swap_file ||
555 sio->iocb.ki_pos + sio->len != pos) {
556 swap_read_unplug(sio);
557 sio = NULL;
560 if (!sio) {
561 sio = mempool_alloc(sio_pool, GFP_KERNEL);
562 init_sync_kiocb(&sio->iocb, sis->swap_file);
563 sio->iocb.ki_pos = pos;
564 sio->iocb.ki_complete = sio_read_complete;
565 sio->pages = 0;
566 sio->len = 0;
568 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
569 sio->len += folio_size(folio);
570 sio->pages += 1;
571 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
572 swap_read_unplug(sio);
573 sio = NULL;
575 if (plug)
576 *plug = sio;
579 static void swap_read_folio_bdev_sync(struct folio *folio,
580 struct swap_info_struct *sis)
582 struct bio_vec bv;
583 struct bio bio;
585 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
586 bio.bi_iter.bi_sector = swap_folio_sector(folio);
587 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
589 * Keep this task valid during swap readpage because the oom killer may
590 * attempt to access it in the page fault retry time check.
592 get_task_struct(current);
593 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
594 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
595 count_vm_events(PSWPIN, folio_nr_pages(folio));
596 submit_bio_wait(&bio);
597 __end_swap_bio_read(&bio);
598 put_task_struct(current);
601 static void swap_read_folio_bdev_async(struct folio *folio,
602 struct swap_info_struct *sis)
604 struct bio *bio;
606 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
607 bio->bi_iter.bi_sector = swap_folio_sector(folio);
608 bio->bi_end_io = end_swap_bio_read;
609 bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
610 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
611 count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
612 count_vm_events(PSWPIN, folio_nr_pages(folio));
613 submit_bio(bio);
616 void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
618 struct swap_info_struct *sis = swp_swap_info(folio->swap);
619 bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO;
620 bool workingset = folio_test_workingset(folio);
621 unsigned long pflags;
622 bool in_thrashing;
624 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio);
625 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
626 VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
629 * Count submission time as memory stall and delay. When the device
630 * is congested, or the submitting cgroup IO-throttled, submission
631 * can be a significant part of overall IO time.
633 if (workingset) {
634 delayacct_thrashing_start(&in_thrashing);
635 psi_memstall_enter(&pflags);
637 delayacct_swapin_start();
639 if (swap_read_folio_zeromap(folio)) {
640 folio_unlock(folio);
641 goto finish;
642 } else if (zswap_load(folio)) {
643 folio_unlock(folio);
644 goto finish;
647 /* We have to read from slower devices. Increase zswap protection. */
648 zswap_folio_swapin(folio);
650 if (data_race(sis->flags & SWP_FS_OPS)) {
651 swap_read_folio_fs(folio, plug);
652 } else if (synchronous) {
653 swap_read_folio_bdev_sync(folio, sis);
654 } else {
655 swap_read_folio_bdev_async(folio, sis);
658 finish:
659 if (workingset) {
660 delayacct_thrashing_end(&in_thrashing);
661 psi_memstall_leave(&pflags);
663 delayacct_swapin_end();
666 void __swap_read_unplug(struct swap_iocb *sio)
668 struct iov_iter from;
669 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
670 int ret;
672 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
673 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
674 if (ret != -EIOCBQUEUED)
675 sio_read_complete(&sio->iocb, ret);