1 // SPDX-License-Identifier: GPL-2.0
3 * linux/mm/swap_state.c
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/mempolicy.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/init.h>
17 #include <linux/pagemap.h>
18 #include <linux/pagevec.h>
19 #include <linux/backing-dev.h>
20 #include <linux/blkdev.h>
21 #include <linux/migrate.h>
22 #include <linux/vmalloc.h>
23 #include <linux/swap_slots.h>
24 #include <linux/huge_mm.h>
25 #include <linux/shmem_fs.h>
30 * swapper_space is a fiction, retained to simplify the path through
31 * vmscan's shrink_folio_list.
33 static const struct address_space_operations swap_aops
= {
34 .writepage
= swap_writepage
,
35 .dirty_folio
= noop_dirty_folio
,
36 #ifdef CONFIG_MIGRATION
37 .migrate_folio
= migrate_folio
,
41 struct address_space
*swapper_spaces
[MAX_SWAPFILES
] __read_mostly
;
42 static unsigned int nr_swapper_spaces
[MAX_SWAPFILES
] __read_mostly
;
43 static bool enable_vma_readahead __read_mostly
= true;
45 #define SWAP_RA_ORDER_CEILING 5
47 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
48 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
49 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
50 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
52 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
53 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
54 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
56 #define SWAP_RA_VAL(addr, win, hits) \
57 (((addr) & PAGE_MASK) | \
58 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
59 ((hits) & SWAP_RA_HITS_MASK))
61 /* Initial readahead hits is 4 to start up with a small window */
62 #define GET_SWAP_RA_VAL(vma) \
63 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
65 static atomic_t swapin_readahead_hits
= ATOMIC_INIT(4);
67 void show_swap_cache_info(void)
69 printk("%lu pages in swap cache\n", total_swapcache_pages());
70 printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
71 printk("Total swap = %lukB\n", K(total_swap_pages
));
74 void *get_shadow_from_swap_cache(swp_entry_t entry
)
76 struct address_space
*address_space
= swap_address_space(entry
);
77 pgoff_t idx
= swap_cache_index(entry
);
80 shadow
= xa_load(&address_space
->i_pages
, idx
);
81 if (xa_is_value(shadow
))
87 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
88 * but sets SwapCache flag and private instead of mapping and index.
90 int add_to_swap_cache(struct folio
*folio
, swp_entry_t entry
,
91 gfp_t gfp
, void **shadowp
)
93 struct address_space
*address_space
= swap_address_space(entry
);
94 pgoff_t idx
= swap_cache_index(entry
);
95 XA_STATE_ORDER(xas
, &address_space
->i_pages
, idx
, folio_order(folio
));
96 unsigned long i
, nr
= folio_nr_pages(folio
);
99 xas_set_update(&xas
, workingset_update_node
);
101 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
102 VM_BUG_ON_FOLIO(folio_test_swapcache(folio
), folio
);
103 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio
), folio
);
105 folio_ref_add(folio
, nr
);
106 folio_set_swapcache(folio
);
111 xas_create_range(&xas
);
114 for (i
= 0; i
< nr
; i
++) {
115 VM_BUG_ON_FOLIO(xas
.xa_index
!= idx
+ i
, folio
);
117 old
= xas_load(&xas
);
118 if (xa_is_value(old
))
121 xas_store(&xas
, folio
);
124 address_space
->nrpages
+= nr
;
125 __node_stat_mod_folio(folio
, NR_FILE_PAGES
, nr
);
126 __lruvec_stat_mod_folio(folio
, NR_SWAPCACHE
, nr
);
128 xas_unlock_irq(&xas
);
129 } while (xas_nomem(&xas
, gfp
));
131 if (!xas_error(&xas
))
134 folio_clear_swapcache(folio
);
135 folio_ref_sub(folio
, nr
);
136 return xas_error(&xas
);
140 * This must be called only on folios that have
141 * been verified to be in the swap cache.
143 void __delete_from_swap_cache(struct folio
*folio
,
144 swp_entry_t entry
, void *shadow
)
146 struct address_space
*address_space
= swap_address_space(entry
);
148 long nr
= folio_nr_pages(folio
);
149 pgoff_t idx
= swap_cache_index(entry
);
150 XA_STATE(xas
, &address_space
->i_pages
, idx
);
152 xas_set_update(&xas
, workingset_update_node
);
154 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
155 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio
), folio
);
156 VM_BUG_ON_FOLIO(folio_test_writeback(folio
), folio
);
158 for (i
= 0; i
< nr
; i
++) {
159 void *entry
= xas_store(&xas
, shadow
);
160 VM_BUG_ON_PAGE(entry
!= folio
, entry
);
164 folio_clear_swapcache(folio
);
165 address_space
->nrpages
-= nr
;
166 __node_stat_mod_folio(folio
, NR_FILE_PAGES
, -nr
);
167 __lruvec_stat_mod_folio(folio
, NR_SWAPCACHE
, -nr
);
171 * add_to_swap - allocate swap space for a folio
172 * @folio: folio we want to move to swap
174 * Allocate swap space for the folio and add the folio to the
177 * Context: Caller needs to hold the folio lock.
178 * Return: Whether the folio was added to the swap cache.
180 bool add_to_swap(struct folio
*folio
)
185 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
186 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio
), folio
);
188 entry
= folio_alloc_swap(folio
);
193 * XArray node allocations from PF_MEMALLOC contexts could
194 * completely exhaust the page allocator. __GFP_NOMEMALLOC
195 * stops emergency reserves from being allocated.
197 * TODO: this could cause a theoretical memory reclaim
198 * deadlock in the swap out path.
201 * Add it to the swap cache.
203 err
= add_to_swap_cache(folio
, entry
,
204 __GFP_HIGH
|__GFP_NOMEMALLOC
|__GFP_NOWARN
, NULL
);
207 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
208 * clear SWAP_HAS_CACHE flag.
212 * Normally the folio will be dirtied in unmap because its
213 * pte should be dirty. A special case is MADV_FREE page. The
214 * page's pte could have dirty bit cleared but the folio's
215 * SwapBacked flag is still set because clearing the dirty bit
216 * and SwapBacked flag has no lock protected. For such folio,
217 * unmap will not set dirty bit for it, so folio reclaim will
218 * not write the folio out. This can cause data corruption when
219 * the folio is swapped in later. Always setting the dirty flag
220 * for the folio solves the problem.
222 folio_mark_dirty(folio
);
227 put_swap_folio(folio
, entry
);
232 * This must be called only on folios that have
233 * been verified to be in the swap cache and locked.
234 * It will never put the folio into the free list,
235 * the caller has a reference on the folio.
237 void delete_from_swap_cache(struct folio
*folio
)
239 swp_entry_t entry
= folio
->swap
;
240 struct address_space
*address_space
= swap_address_space(entry
);
242 xa_lock_irq(&address_space
->i_pages
);
243 __delete_from_swap_cache(folio
, entry
, NULL
);
244 xa_unlock_irq(&address_space
->i_pages
);
246 put_swap_folio(folio
, entry
);
247 folio_ref_sub(folio
, folio_nr_pages(folio
));
250 void clear_shadow_from_swap_cache(int type
, unsigned long begin
,
253 unsigned long curr
= begin
;
257 swp_entry_t entry
= swp_entry(type
, curr
);
258 unsigned long index
= curr
& SWAP_ADDRESS_SPACE_MASK
;
259 struct address_space
*address_space
= swap_address_space(entry
);
260 XA_STATE(xas
, &address_space
->i_pages
, index
);
262 xas_set_update(&xas
, workingset_update_node
);
264 xa_lock_irq(&address_space
->i_pages
);
265 xas_for_each(&xas
, old
, min(index
+ (end
- curr
), SWAP_ADDRESS_SPACE_PAGES
)) {
266 if (!xa_is_value(old
))
268 xas_store(&xas
, NULL
);
270 xa_unlock_irq(&address_space
->i_pages
);
272 /* search the next swapcache until we meet end */
273 curr
>>= SWAP_ADDRESS_SPACE_SHIFT
;
275 curr
<<= SWAP_ADDRESS_SPACE_SHIFT
;
282 * If we are the only user, then try to free up the swap cache.
284 * Its ok to check the swapcache flag without the folio lock
285 * here because we are going to recheck again inside
286 * folio_free_swap() _with_ the lock.
289 void free_swap_cache(struct folio
*folio
)
291 if (folio_test_swapcache(folio
) && !folio_mapped(folio
) &&
292 folio_trylock(folio
)) {
293 folio_free_swap(folio
);
299 * Perform a free_page(), also freeing any swap cache associated with
300 * this page if it is the last user of the page.
302 void free_page_and_swap_cache(struct page
*page
)
304 struct folio
*folio
= page_folio(page
);
306 free_swap_cache(folio
);
307 if (!is_huge_zero_folio(folio
))
312 * Passed an array of pages, drop them all from swapcache and then release
313 * them. They are removed from the LRU and freed if this is their last use.
315 void free_pages_and_swap_cache(struct encoded_page
**pages
, int nr
)
317 struct folio_batch folios
;
318 unsigned int refs
[PAGEVEC_SIZE
];
321 folio_batch_init(&folios
);
322 for (int i
= 0; i
< nr
; i
++) {
323 struct folio
*folio
= page_folio(encoded_page_ptr(pages
[i
]));
325 free_swap_cache(folio
);
327 if (unlikely(encoded_page_flags(pages
[i
]) &
328 ENCODED_PAGE_BIT_NR_PAGES_NEXT
))
329 refs
[folios
.nr
] = encoded_nr_pages(pages
[++i
]);
331 if (folio_batch_add(&folios
, folio
) == 0)
332 folios_put_refs(&folios
, refs
);
335 folios_put_refs(&folios
, refs
);
338 static inline bool swap_use_vma_readahead(void)
340 return READ_ONCE(enable_vma_readahead
) && !atomic_read(&nr_rotate_swap
);
344 * Lookup a swap entry in the swap cache. A found folio will be returned
345 * unlocked and with its refcount incremented - we rely on the kernel
346 * lock getting page table operations atomic even if we drop the folio
347 * lock before returning.
349 * Caller must lock the swap device or hold a reference to keep it valid.
351 struct folio
*swap_cache_get_folio(swp_entry_t entry
,
352 struct vm_area_struct
*vma
, unsigned long addr
)
356 folio
= filemap_get_folio(swap_address_space(entry
), swap_cache_index(entry
));
357 if (!IS_ERR(folio
)) {
358 bool vma_ra
= swap_use_vma_readahead();
362 * At the moment, we don't support PG_readahead for anon THP
363 * so let's bail out rather than confusing the readahead stat.
365 if (unlikely(folio_test_large(folio
)))
368 readahead
= folio_test_clear_readahead(folio
);
370 unsigned long ra_val
;
373 ra_val
= GET_SWAP_RA_VAL(vma
);
374 win
= SWAP_RA_WIN(ra_val
);
375 hits
= SWAP_RA_HITS(ra_val
);
377 hits
= min_t(int, hits
+ 1, SWAP_RA_HITS_MAX
);
378 atomic_long_set(&vma
->swap_readahead_info
,
379 SWAP_RA_VAL(addr
, win
, hits
));
383 count_vm_event(SWAP_RA_HIT
);
385 atomic_inc(&swapin_readahead_hits
);
395 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
396 * @mapping: The address_space to search.
397 * @index: The page cache index.
399 * This differs from filemap_get_folio() in that it will also look for the
400 * folio in the swap cache.
402 * Return: The found folio or %NULL.
404 struct folio
*filemap_get_incore_folio(struct address_space
*mapping
,
408 struct swap_info_struct
*si
;
409 struct folio
*folio
= filemap_get_entry(mapping
, index
);
412 return ERR_PTR(-ENOENT
);
413 if (!xa_is_value(folio
))
415 if (!shmem_mapping(mapping
))
416 return ERR_PTR(-ENOENT
);
418 swp
= radix_to_swp_entry(folio
);
419 /* There might be swapin error entries in shmem mapping. */
420 if (non_swap_entry(swp
))
421 return ERR_PTR(-ENOENT
);
422 /* Prevent swapoff from happening to us */
423 si
= get_swap_device(swp
);
425 return ERR_PTR(-ENOENT
);
426 index
= swap_cache_index(swp
);
427 folio
= filemap_get_folio(swap_address_space(swp
), index
);
432 struct folio
*__read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
433 struct mempolicy
*mpol
, pgoff_t ilx
, bool *new_page_allocated
,
436 struct swap_info_struct
*si
;
438 struct folio
*new_folio
= NULL
;
439 struct folio
*result
= NULL
;
442 *new_page_allocated
= false;
443 si
= get_swap_device(entry
);
450 * First check the swap cache. Since this is normally
451 * called after swap_cache_get_folio() failed, re-calling
452 * that would confuse statistics.
454 folio
= filemap_get_folio(swap_address_space(entry
),
455 swap_cache_index(entry
));
460 * Just skip read ahead for unused swap slot.
461 * During swap_off when swap_slot_cache is disabled,
462 * we have to handle the race between putting
463 * swap entry in swap cache and marking swap slot
464 * as SWAP_HAS_CACHE. That's done in later part of code or
465 * else swap_off will be aborted if we return NULL.
467 if (!swap_swapcount(si
, entry
) && swap_slot_cache_enabled
)
471 * Get a new folio to read into from swap. Allocate it now if
472 * new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
473 * when -EEXIST will cause any racers to loop around until we
477 new_folio
= folio_alloc_mpol(gfp_mask
, 0, mpol
, ilx
, numa_node_id());
483 * Swap entry may have been freed since our caller observed it.
485 err
= swapcache_prepare(entry
, 1);
488 else if (err
!= -EEXIST
)
492 * Protect against a recursive call to __read_swap_cache_async()
493 * on the same entry waiting forever here because SWAP_HAS_CACHE
494 * is set but the folio is not the swap cache yet. This can
495 * happen today if mem_cgroup_swapin_charge_folio() below
496 * triggers reclaim through zswap, which may call
497 * __read_swap_cache_async() in the writeback path.
503 * We might race against __delete_from_swap_cache(), and
504 * stumble across a swap_map entry whose SWAP_HAS_CACHE
505 * has not yet been cleared. Or race against another
506 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
507 * in swap_map, but not yet added its folio to swap cache.
509 schedule_timeout_uninterruptible(1);
513 * The swap entry is ours to swap in. Prepare the new folio.
515 __folio_set_locked(new_folio
);
516 __folio_set_swapbacked(new_folio
);
518 if (mem_cgroup_swapin_charge_folio(new_folio
, NULL
, gfp_mask
, entry
))
521 /* May fail (-ENOMEM) if XArray node allocation failed. */
522 if (add_to_swap_cache(new_folio
, entry
, gfp_mask
& GFP_RECLAIM_MASK
, &shadow
))
525 mem_cgroup_swapin_uncharge_swap(entry
, 1);
528 workingset_refault(new_folio
, shadow
);
530 /* Caller will initiate read into locked new_folio */
531 folio_add_lru(new_folio
);
532 *new_page_allocated
= true;
539 put_swap_folio(new_folio
, entry
);
540 folio_unlock(new_folio
);
543 if (!(*new_page_allocated
) && new_folio
)
544 folio_put(new_folio
);
549 * Locate a page of swap in physical memory, reserving swap cache space
550 * and reading the disk if it is not already cached.
551 * A failure return means that either the page allocation failed or that
552 * the swap entry is no longer in use.
554 * get/put_swap_device() aren't needed to call this function, because
555 * __read_swap_cache_async() call them and swap_read_folio() holds the
556 * swap cache folio lock.
558 struct folio
*read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
559 struct vm_area_struct
*vma
, unsigned long addr
,
560 struct swap_iocb
**plug
)
563 struct mempolicy
*mpol
;
567 mpol
= get_vma_policy(vma
, addr
, 0, &ilx
);
568 folio
= __read_swap_cache_async(entry
, gfp_mask
, mpol
, ilx
,
569 &page_allocated
, false);
573 swap_read_folio(folio
, plug
);
577 static unsigned int __swapin_nr_pages(unsigned long prev_offset
,
578 unsigned long offset
,
583 unsigned int pages
, last_ra
;
586 * This heuristic has been found to work well on both sequential and
587 * random loads, swapping to hard disk or to SSD: please don't ask
588 * what the "+ 2" means, it just happens to work well, that's all.
593 * We can have no readahead hits to judge by: but must not get
594 * stuck here forever, so check for an adjacent offset instead
595 * (and don't even bother to check whether swap type is same).
597 if (offset
!= prev_offset
+ 1 && offset
!= prev_offset
- 1)
600 unsigned int roundup
= 4;
601 while (roundup
< pages
)
606 if (pages
> max_pages
)
609 /* Don't shrink readahead too fast */
610 last_ra
= prev_win
/ 2;
617 static unsigned long swapin_nr_pages(unsigned long offset
)
619 static unsigned long prev_offset
;
620 unsigned int hits
, pages
, max_pages
;
621 static atomic_t last_readahead_pages
;
623 max_pages
= 1 << READ_ONCE(page_cluster
);
627 hits
= atomic_xchg(&swapin_readahead_hits
, 0);
628 pages
= __swapin_nr_pages(READ_ONCE(prev_offset
), offset
, hits
,
630 atomic_read(&last_readahead_pages
));
632 WRITE_ONCE(prev_offset
, offset
);
633 atomic_set(&last_readahead_pages
, pages
);
639 * swap_cluster_readahead - swap in pages in hope we need them soon
640 * @entry: swap entry of this memory
641 * @gfp_mask: memory allocation flags
642 * @mpol: NUMA memory allocation policy to be applied
643 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
645 * Returns the struct folio for entry and addr, after queueing swapin.
647 * Primitive swap readahead code. We simply read an aligned block of
648 * (1 << page_cluster) entries in the swap area. This method is chosen
649 * because it doesn't cost us any seek time. We also make sure to queue
650 * the 'original' request together with the readahead ones...
652 * Note: it is intentional that the same NUMA policy and interleave index
653 * are used for every page of the readahead: neighbouring pages on swap
654 * are fairly likely to have been swapped out from the same node.
656 struct folio
*swap_cluster_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
657 struct mempolicy
*mpol
, pgoff_t ilx
)
660 unsigned long entry_offset
= swp_offset(entry
);
661 unsigned long offset
= entry_offset
;
662 unsigned long start_offset
, end_offset
;
664 struct swap_info_struct
*si
= swp_swap_info(entry
);
665 struct blk_plug plug
;
666 struct swap_iocb
*splug
= NULL
;
669 mask
= swapin_nr_pages(offset
) - 1;
673 /* Read a page_cluster sized and aligned cluster around offset. */
674 start_offset
= offset
& ~mask
;
675 end_offset
= offset
| mask
;
676 if (!start_offset
) /* First page is swap header. */
678 if (end_offset
>= si
->max
)
679 end_offset
= si
->max
- 1;
681 blk_start_plug(&plug
);
682 for (offset
= start_offset
; offset
<= end_offset
; offset
++) {
683 /* Ok, do the async read-ahead now */
684 folio
= __read_swap_cache_async(
685 swp_entry(swp_type(entry
), offset
),
686 gfp_mask
, mpol
, ilx
, &page_allocated
, false);
689 if (page_allocated
) {
690 swap_read_folio(folio
, &splug
);
691 if (offset
!= entry_offset
) {
692 folio_set_readahead(folio
);
693 count_vm_event(SWAP_RA
);
698 blk_finish_plug(&plug
);
699 swap_read_unplug(splug
);
700 lru_add_drain(); /* Push any new pages onto the LRU now */
702 /* The page was likely read above, so no need for plugging here */
703 folio
= __read_swap_cache_async(entry
, gfp_mask
, mpol
, ilx
,
704 &page_allocated
, false);
705 if (unlikely(page_allocated
))
706 swap_read_folio(folio
, NULL
);
710 int init_swap_address_space(unsigned int type
, unsigned long nr_pages
)
712 struct address_space
*spaces
, *space
;
715 nr
= DIV_ROUND_UP(nr_pages
, SWAP_ADDRESS_SPACE_PAGES
);
716 spaces
= kvcalloc(nr
, sizeof(struct address_space
), GFP_KERNEL
);
719 for (i
= 0; i
< nr
; i
++) {
721 xa_init_flags(&space
->i_pages
, XA_FLAGS_LOCK_IRQ
);
722 atomic_set(&space
->i_mmap_writable
, 0);
723 space
->a_ops
= &swap_aops
;
724 /* swap cache doesn't use writeback related tags */
725 mapping_set_no_writeback_tags(space
);
727 nr_swapper_spaces
[type
] = nr
;
728 swapper_spaces
[type
] = spaces
;
733 void exit_swap_address_space(unsigned int type
)
736 struct address_space
*spaces
= swapper_spaces
[type
];
738 for (i
= 0; i
< nr_swapper_spaces
[type
]; i
++)
739 VM_WARN_ON_ONCE(!mapping_empty(&spaces
[i
]));
741 nr_swapper_spaces
[type
] = 0;
742 swapper_spaces
[type
] = NULL
;
745 static int swap_vma_ra_win(struct vm_fault
*vmf
, unsigned long *start
,
748 struct vm_area_struct
*vma
= vmf
->vma
;
749 unsigned long ra_val
;
750 unsigned long faddr
, prev_faddr
, left
, right
;
751 unsigned int max_win
, hits
, prev_win
, win
;
753 max_win
= 1 << min(READ_ONCE(page_cluster
), SWAP_RA_ORDER_CEILING
);
757 faddr
= vmf
->address
;
758 ra_val
= GET_SWAP_RA_VAL(vma
);
759 prev_faddr
= SWAP_RA_ADDR(ra_val
);
760 prev_win
= SWAP_RA_WIN(ra_val
);
761 hits
= SWAP_RA_HITS(ra_val
);
762 win
= __swapin_nr_pages(PFN_DOWN(prev_faddr
), PFN_DOWN(faddr
), hits
,
764 atomic_long_set(&vma
->swap_readahead_info
, SWAP_RA_VAL(faddr
, win
, 0));
768 if (faddr
== prev_faddr
+ PAGE_SIZE
)
770 else if (prev_faddr
== faddr
+ PAGE_SIZE
)
771 left
= faddr
- (win
<< PAGE_SHIFT
) + PAGE_SIZE
;
773 left
= faddr
- (((win
- 1) / 2) << PAGE_SHIFT
);
774 right
= left
+ (win
<< PAGE_SHIFT
);
777 *start
= max3(left
, vma
->vm_start
, faddr
& PMD_MASK
);
778 *end
= min3(right
, vma
->vm_end
, (faddr
& PMD_MASK
) + PMD_SIZE
);
784 * swap_vma_readahead - swap in pages in hope we need them soon
785 * @targ_entry: swap entry of the targeted memory
786 * @gfp_mask: memory allocation flags
787 * @mpol: NUMA memory allocation policy to be applied
788 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
789 * @vmf: fault information
791 * Returns the struct folio for entry and addr, after queueing swapin.
793 * Primitive swap readahead code. We simply read in a few pages whose
794 * virtual addresses are around the fault address in the same vma.
796 * Caller must hold read mmap_lock if vmf->vma is not NULL.
799 static struct folio
*swap_vma_readahead(swp_entry_t targ_entry
, gfp_t gfp_mask
,
800 struct mempolicy
*mpol
, pgoff_t targ_ilx
, struct vm_fault
*vmf
)
802 struct blk_plug plug
;
803 struct swap_iocb
*splug
= NULL
;
805 pte_t
*pte
= NULL
, pentry
;
807 unsigned long start
, end
, addr
;
812 win
= swap_vma_ra_win(vmf
, &start
, &end
);
816 ilx
= targ_ilx
- PFN_DOWN(vmf
->address
- start
);
818 blk_start_plug(&plug
);
819 for (addr
= start
; addr
< end
; ilx
++, addr
+= PAGE_SIZE
) {
821 pte
= pte_offset_map(vmf
->pmd
, addr
);
825 pentry
= ptep_get_lockless(pte
);
826 if (!is_swap_pte(pentry
))
828 entry
= pte_to_swp_entry(pentry
);
829 if (unlikely(non_swap_entry(entry
)))
833 folio
= __read_swap_cache_async(entry
, gfp_mask
, mpol
, ilx
,
834 &page_allocated
, false);
837 if (page_allocated
) {
838 swap_read_folio(folio
, &splug
);
839 if (addr
!= vmf
->address
) {
840 folio_set_readahead(folio
);
841 count_vm_event(SWAP_RA
);
848 blk_finish_plug(&plug
);
849 swap_read_unplug(splug
);
852 /* The folio was likely read above, so no need for plugging here */
853 folio
= __read_swap_cache_async(targ_entry
, gfp_mask
, mpol
, targ_ilx
,
854 &page_allocated
, false);
855 if (unlikely(page_allocated
))
856 swap_read_folio(folio
, NULL
);
861 * swapin_readahead - swap in pages in hope we need them soon
862 * @entry: swap entry of this memory
863 * @gfp_mask: memory allocation flags
864 * @vmf: fault information
866 * Returns the struct folio for entry and addr, after queueing swapin.
868 * It's a main entry function for swap readahead. By the configuration,
869 * it will read ahead blocks by cluster-based(ie, physical disk based)
870 * or vma-based(ie, virtual address based on faulty address) readahead.
872 struct folio
*swapin_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
873 struct vm_fault
*vmf
)
875 struct mempolicy
*mpol
;
879 mpol
= get_vma_policy(vmf
->vma
, vmf
->address
, 0, &ilx
);
880 folio
= swap_use_vma_readahead() ?
881 swap_vma_readahead(entry
, gfp_mask
, mpol
, ilx
, vmf
) :
882 swap_cluster_readahead(entry
, gfp_mask
, mpol
, ilx
);
889 static ssize_t
vma_ra_enabled_show(struct kobject
*kobj
,
890 struct kobj_attribute
*attr
, char *buf
)
892 return sysfs_emit(buf
, "%s\n", str_true_false(enable_vma_readahead
));
894 static ssize_t
vma_ra_enabled_store(struct kobject
*kobj
,
895 struct kobj_attribute
*attr
,
896 const char *buf
, size_t count
)
900 ret
= kstrtobool(buf
, &enable_vma_readahead
);
906 static struct kobj_attribute vma_ra_enabled_attr
= __ATTR_RW(vma_ra_enabled
);
908 static struct attribute
*swap_attrs
[] = {
909 &vma_ra_enabled_attr
.attr
,
913 static const struct attribute_group swap_attr_group
= {
917 static int __init
swap_init_sysfs(void)
920 struct kobject
*swap_kobj
;
922 swap_kobj
= kobject_create_and_add("swap", mm_kobj
);
924 pr_err("failed to create swap kobject\n");
927 err
= sysfs_create_group(swap_kobj
, &swap_attr_group
);
929 pr_err("failed to register swap group\n");
935 kobject_put(swap_kobj
);
938 subsys_initcall(swap_init_sysfs
);