1 // SPDX-License-Identifier: GPL-2.0
3 * linux/mm/swap_state.c
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/pagevec.h>
20 #include <linux/migrate.h>
21 #include <linux/vmalloc.h>
22 #include <linux/swap_slots.h>
23 #include <linux/huge_mm.h>
24 #include <linux/shmem_fs.h>
28 * swapper_space is a fiction, retained to simplify the path through
29 * vmscan's shrink_page_list.
31 static const struct address_space_operations swap_aops
= {
32 .writepage
= swap_writepage
,
33 .set_page_dirty
= swap_set_page_dirty
,
34 #ifdef CONFIG_MIGRATION
35 .migratepage
= migrate_page
,
39 struct address_space
*swapper_spaces
[MAX_SWAPFILES
] __read_mostly
;
40 static unsigned int nr_swapper_spaces
[MAX_SWAPFILES
] __read_mostly
;
41 static bool enable_vma_readahead __read_mostly
= true;
43 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
46 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
49 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52 #define SWAP_RA_VAL(addr, win, hits) \
53 (((addr) & PAGE_MASK) | \
54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
55 ((hits) & SWAP_RA_HITS_MASK))
57 /* Initial readahead hits is 4 to start up with a small window */
58 #define GET_SWAP_RA_VAL(vma) \
59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
61 #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++)
62 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr))
65 unsigned long add_total
;
66 unsigned long del_total
;
67 unsigned long find_success
;
68 unsigned long find_total
;
71 unsigned long total_swapcache_pages(void)
73 unsigned int i
, j
, nr
;
74 unsigned long ret
= 0;
75 struct address_space
*spaces
;
76 struct swap_info_struct
*si
;
78 for (i
= 0; i
< MAX_SWAPFILES
; i
++) {
79 swp_entry_t entry
= swp_entry(i
, 1);
81 /* Avoid get_swap_device() to warn for bad swap entry */
82 if (!swp_swap_info(entry
))
84 /* Prevent swapoff to free swapper_spaces */
85 si
= get_swap_device(entry
);
88 nr
= nr_swapper_spaces
[i
];
89 spaces
= swapper_spaces
[i
];
90 for (j
= 0; j
< nr
; j
++)
91 ret
+= spaces
[j
].nrpages
;
97 static atomic_t swapin_readahead_hits
= ATOMIC_INIT(4);
99 void show_swap_cache_info(void)
101 printk("%lu pages in swap cache\n", total_swapcache_pages());
102 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
103 swap_cache_info
.add_total
, swap_cache_info
.del_total
,
104 swap_cache_info
.find_success
, swap_cache_info
.find_total
);
105 printk("Free swap = %ldkB\n",
106 get_nr_swap_pages() << (PAGE_SHIFT
- 10));
107 printk("Total swap = %lukB\n", total_swap_pages
<< (PAGE_SHIFT
- 10));
110 void *get_shadow_from_swap_cache(swp_entry_t entry
)
112 struct address_space
*address_space
= swap_address_space(entry
);
113 pgoff_t idx
= swp_offset(entry
);
116 page
= find_get_entry(address_space
, idx
);
117 if (xa_is_value(page
))
125 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
126 * but sets SwapCache flag and private instead of mapping and index.
128 int add_to_swap_cache(struct page
*page
, swp_entry_t entry
,
129 gfp_t gfp
, void **shadowp
)
131 struct address_space
*address_space
= swap_address_space(entry
);
132 pgoff_t idx
= swp_offset(entry
);
133 XA_STATE_ORDER(xas
, &address_space
->i_pages
, idx
, compound_order(page
));
134 unsigned long i
, nr
= thp_nr_pages(page
);
137 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
138 VM_BUG_ON_PAGE(PageSwapCache(page
), page
);
139 VM_BUG_ON_PAGE(!PageSwapBacked(page
), page
);
141 page_ref_add(page
, nr
);
142 SetPageSwapCache(page
);
145 unsigned long nr_shadows
= 0;
148 xas_create_range(&xas
);
151 for (i
= 0; i
< nr
; i
++) {
152 VM_BUG_ON_PAGE(xas
.xa_index
!= idx
+ i
, page
);
153 old
= xas_load(&xas
);
154 if (xa_is_value(old
)) {
159 set_page_private(page
+ i
, entry
.val
+ i
);
160 xas_store(&xas
, page
);
163 address_space
->nrexceptional
-= nr_shadows
;
164 address_space
->nrpages
+= nr
;
165 __mod_node_page_state(page_pgdat(page
), NR_FILE_PAGES
, nr
);
166 ADD_CACHE_INFO(add_total
, nr
);
168 xas_unlock_irq(&xas
);
169 } while (xas_nomem(&xas
, gfp
));
171 if (!xas_error(&xas
))
174 ClearPageSwapCache(page
);
175 page_ref_sub(page
, nr
);
176 return xas_error(&xas
);
180 * This must be called only on pages that have
181 * been verified to be in the swap cache.
183 void __delete_from_swap_cache(struct page
*page
,
184 swp_entry_t entry
, void *shadow
)
186 struct address_space
*address_space
= swap_address_space(entry
);
187 int i
, nr
= thp_nr_pages(page
);
188 pgoff_t idx
= swp_offset(entry
);
189 XA_STATE(xas
, &address_space
->i_pages
, idx
);
191 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
192 VM_BUG_ON_PAGE(!PageSwapCache(page
), page
);
193 VM_BUG_ON_PAGE(PageWriteback(page
), page
);
195 for (i
= 0; i
< nr
; i
++) {
196 void *entry
= xas_store(&xas
, shadow
);
197 VM_BUG_ON_PAGE(entry
!= page
, entry
);
198 set_page_private(page
+ i
, 0);
201 ClearPageSwapCache(page
);
203 address_space
->nrexceptional
+= nr
;
204 address_space
->nrpages
-= nr
;
205 __mod_node_page_state(page_pgdat(page
), NR_FILE_PAGES
, -nr
);
206 ADD_CACHE_INFO(del_total
, nr
);
210 * add_to_swap - allocate swap space for a page
211 * @page: page we want to move to swap
213 * Allocate swap space for the page and add the page to the
214 * swap cache. Caller needs to hold the page lock.
216 int add_to_swap(struct page
*page
)
221 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
222 VM_BUG_ON_PAGE(!PageUptodate(page
), page
);
224 entry
= get_swap_page(page
);
229 * XArray node allocations from PF_MEMALLOC contexts could
230 * completely exhaust the page allocator. __GFP_NOMEMALLOC
231 * stops emergency reserves from being allocated.
233 * TODO: this could cause a theoretical memory reclaim
234 * deadlock in the swap out path.
237 * Add it to the swap cache.
239 err
= add_to_swap_cache(page
, entry
,
240 __GFP_HIGH
|__GFP_NOMEMALLOC
|__GFP_NOWARN
, NULL
);
243 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
244 * clear SWAP_HAS_CACHE flag.
248 * Normally the page will be dirtied in unmap because its pte should be
249 * dirty. A special case is MADV_FREE page. The page's pte could have
250 * dirty bit cleared but the page's SwapBacked bit is still set because
251 * clearing the dirty bit and SwapBacked bit has no lock protected. For
252 * such page, unmap will not set dirty bit for it, so page reclaim will
253 * not write the page out. This can cause data corruption when the page
254 * is swap in later. Always setting the dirty bit for the page solves
257 set_page_dirty(page
);
262 put_swap_page(page
, entry
);
267 * This must be called only on pages that have
268 * been verified to be in the swap cache and locked.
269 * It will never put the page into the free list,
270 * the caller has a reference on the page.
272 void delete_from_swap_cache(struct page
*page
)
274 swp_entry_t entry
= { .val
= page_private(page
) };
275 struct address_space
*address_space
= swap_address_space(entry
);
277 xa_lock_irq(&address_space
->i_pages
);
278 __delete_from_swap_cache(page
, entry
, NULL
);
279 xa_unlock_irq(&address_space
->i_pages
);
281 put_swap_page(page
, entry
);
282 page_ref_sub(page
, thp_nr_pages(page
));
285 void clear_shadow_from_swap_cache(int type
, unsigned long begin
,
288 unsigned long curr
= begin
;
292 unsigned long nr_shadows
= 0;
293 swp_entry_t entry
= swp_entry(type
, curr
);
294 struct address_space
*address_space
= swap_address_space(entry
);
295 XA_STATE(xas
, &address_space
->i_pages
, curr
);
297 xa_lock_irq(&address_space
->i_pages
);
298 xas_for_each(&xas
, old
, end
) {
299 if (!xa_is_value(old
))
301 xas_store(&xas
, NULL
);
304 address_space
->nrexceptional
-= nr_shadows
;
305 xa_unlock_irq(&address_space
->i_pages
);
307 /* search the next swapcache until we meet end */
308 curr
>>= SWAP_ADDRESS_SPACE_SHIFT
;
310 curr
<<= SWAP_ADDRESS_SPACE_SHIFT
;
317 * If we are the only user, then try to free up the swap cache.
319 * Its ok to check for PageSwapCache without the page lock
320 * here because we are going to recheck again inside
321 * try_to_free_swap() _with_ the lock.
324 static inline void free_swap_cache(struct page
*page
)
326 if (PageSwapCache(page
) && !page_mapped(page
) && trylock_page(page
)) {
327 try_to_free_swap(page
);
333 * Perform a free_page(), also freeing any swap cache associated with
334 * this page if it is the last user of the page.
336 void free_page_and_swap_cache(struct page
*page
)
338 free_swap_cache(page
);
339 if (!is_huge_zero_page(page
))
344 * Passed an array of pages, drop them all from swapcache and then release
345 * them. They are removed from the LRU and freed if this is their last use.
347 void free_pages_and_swap_cache(struct page
**pages
, int nr
)
349 struct page
**pagep
= pages
;
353 for (i
= 0; i
< nr
; i
++)
354 free_swap_cache(pagep
[i
]);
355 release_pages(pagep
, nr
);
358 static inline bool swap_use_vma_readahead(void)
360 return READ_ONCE(enable_vma_readahead
) && !atomic_read(&nr_rotate_swap
);
364 * Lookup a swap entry in the swap cache. A found page will be returned
365 * unlocked and with its refcount incremented - we rely on the kernel
366 * lock getting page table operations atomic even if we drop the page
367 * lock before returning.
369 struct page
*lookup_swap_cache(swp_entry_t entry
, struct vm_area_struct
*vma
,
373 struct swap_info_struct
*si
;
375 si
= get_swap_device(entry
);
378 page
= find_get_page(swap_address_space(entry
), swp_offset(entry
));
381 INC_CACHE_INFO(find_total
);
383 bool vma_ra
= swap_use_vma_readahead();
386 INC_CACHE_INFO(find_success
);
388 * At the moment, we don't support PG_readahead for anon THP
389 * so let's bail out rather than confusing the readahead stat.
391 if (unlikely(PageTransCompound(page
)))
394 readahead
= TestClearPageReadahead(page
);
396 unsigned long ra_val
;
399 ra_val
= GET_SWAP_RA_VAL(vma
);
400 win
= SWAP_RA_WIN(ra_val
);
401 hits
= SWAP_RA_HITS(ra_val
);
403 hits
= min_t(int, hits
+ 1, SWAP_RA_HITS_MAX
);
404 atomic_long_set(&vma
->swap_readahead_info
,
405 SWAP_RA_VAL(addr
, win
, hits
));
409 count_vm_event(SWAP_RA_HIT
);
411 atomic_inc(&swapin_readahead_hits
);
419 * find_get_incore_page - Find and get a page from the page or swap caches.
420 * @mapping: The address_space to search.
421 * @index: The page cache index.
423 * This differs from find_get_page() in that it will also look for the
424 * page in the swap cache.
426 * Return: The found page or %NULL.
428 struct page
*find_get_incore_page(struct address_space
*mapping
, pgoff_t index
)
431 struct swap_info_struct
*si
;
432 struct page
*page
= find_get_entry(mapping
, index
);
436 if (!xa_is_value(page
))
437 return find_subpage(page
, index
);
438 if (!shmem_mapping(mapping
))
441 swp
= radix_to_swp_entry(page
);
442 /* Prevent swapoff from happening to us */
443 si
= get_swap_device(swp
);
446 page
= find_get_page(swap_address_space(swp
), swp_offset(swp
));
451 struct page
*__read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
452 struct vm_area_struct
*vma
, unsigned long addr
,
453 bool *new_page_allocated
)
455 struct swap_info_struct
*si
;
459 *new_page_allocated
= false;
464 * First check the swap cache. Since this is normally
465 * called after lookup_swap_cache() failed, re-calling
466 * that would confuse statistics.
468 si
= get_swap_device(entry
);
471 page
= find_get_page(swap_address_space(entry
),
478 * Just skip read ahead for unused swap slot.
479 * During swap_off when swap_slot_cache is disabled,
480 * we have to handle the race between putting
481 * swap entry in swap cache and marking swap slot
482 * as SWAP_HAS_CACHE. That's done in later part of code or
483 * else swap_off will be aborted if we return NULL.
485 if (!__swp_swapcount(entry
) && swap_slot_cache_enabled
)
489 * Get a new page to read into from swap. Allocate it now,
490 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
491 * cause any racers to loop around until we add it to cache.
493 page
= alloc_page_vma(gfp_mask
, vma
, addr
);
498 * Swap entry may have been freed since our caller observed it.
500 err
= swapcache_prepare(entry
);
509 * We might race against __delete_from_swap_cache(), and
510 * stumble across a swap_map entry whose SWAP_HAS_CACHE
511 * has not yet been cleared. Or race against another
512 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
513 * in swap_map, but not yet added its page to swap cache.
519 * The swap entry is ours to swap in. Prepare the new page.
522 __SetPageLocked(page
);
523 __SetPageSwapBacked(page
);
525 /* May fail (-ENOMEM) if XArray node allocation failed. */
526 if (add_to_swap_cache(page
, entry
, gfp_mask
& GFP_RECLAIM_MASK
, &shadow
)) {
527 put_swap_page(page
, entry
);
531 if (mem_cgroup_charge(page
, NULL
, gfp_mask
)) {
532 delete_from_swap_cache(page
);
537 workingset_refault(page
, shadow
);
539 /* Caller will initiate read into locked page */
540 SetPageWorkingset(page
);
542 *new_page_allocated
= true;
552 * Locate a page of swap in physical memory, reserving swap cache space
553 * and reading the disk if it is not already cached.
554 * A failure return means that either the page allocation failed or that
555 * the swap entry is no longer in use.
557 struct page
*read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
558 struct vm_area_struct
*vma
, unsigned long addr
, bool do_poll
)
560 bool page_was_allocated
;
561 struct page
*retpage
= __read_swap_cache_async(entry
, gfp_mask
,
562 vma
, addr
, &page_was_allocated
);
564 if (page_was_allocated
)
565 swap_readpage(retpage
, do_poll
);
570 static unsigned int __swapin_nr_pages(unsigned long prev_offset
,
571 unsigned long offset
,
576 unsigned int pages
, last_ra
;
579 * This heuristic has been found to work well on both sequential and
580 * random loads, swapping to hard disk or to SSD: please don't ask
581 * what the "+ 2" means, it just happens to work well, that's all.
586 * We can have no readahead hits to judge by: but must not get
587 * stuck here forever, so check for an adjacent offset instead
588 * (and don't even bother to check whether swap type is same).
590 if (offset
!= prev_offset
+ 1 && offset
!= prev_offset
- 1)
593 unsigned int roundup
= 4;
594 while (roundup
< pages
)
599 if (pages
> max_pages
)
602 /* Don't shrink readahead too fast */
603 last_ra
= prev_win
/ 2;
610 static unsigned long swapin_nr_pages(unsigned long offset
)
612 static unsigned long prev_offset
;
613 unsigned int hits
, pages
, max_pages
;
614 static atomic_t last_readahead_pages
;
616 max_pages
= 1 << READ_ONCE(page_cluster
);
620 hits
= atomic_xchg(&swapin_readahead_hits
, 0);
621 pages
= __swapin_nr_pages(READ_ONCE(prev_offset
), offset
, hits
,
623 atomic_read(&last_readahead_pages
));
625 WRITE_ONCE(prev_offset
, offset
);
626 atomic_set(&last_readahead_pages
, pages
);
632 * swap_cluster_readahead - swap in pages in hope we need them soon
633 * @entry: swap entry of this memory
634 * @gfp_mask: memory allocation flags
635 * @vmf: fault information
637 * Returns the struct page for entry and addr, after queueing swapin.
639 * Primitive swap readahead code. We simply read an aligned block of
640 * (1 << page_cluster) entries in the swap area. This method is chosen
641 * because it doesn't cost us any seek time. We also make sure to queue
642 * the 'original' request together with the readahead ones...
644 * This has been extended to use the NUMA policies from the mm triggering
647 * Caller must hold read mmap_lock if vmf->vma is not NULL.
649 struct page
*swap_cluster_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
650 struct vm_fault
*vmf
)
653 unsigned long entry_offset
= swp_offset(entry
);
654 unsigned long offset
= entry_offset
;
655 unsigned long start_offset
, end_offset
;
657 struct swap_info_struct
*si
= swp_swap_info(entry
);
658 struct blk_plug plug
;
659 bool do_poll
= true, page_allocated
;
660 struct vm_area_struct
*vma
= vmf
->vma
;
661 unsigned long addr
= vmf
->address
;
663 mask
= swapin_nr_pages(offset
) - 1;
667 /* Test swap type to make sure the dereference is safe */
668 if (likely(si
->flags
& (SWP_BLKDEV
| SWP_FS_OPS
))) {
669 struct inode
*inode
= si
->swap_file
->f_mapping
->host
;
670 if (inode_read_congested(inode
))
675 /* Read a page_cluster sized and aligned cluster around offset. */
676 start_offset
= offset
& ~mask
;
677 end_offset
= offset
| mask
;
678 if (!start_offset
) /* First page is swap header. */
680 if (end_offset
>= si
->max
)
681 end_offset
= si
->max
- 1;
683 blk_start_plug(&plug
);
684 for (offset
= start_offset
; offset
<= end_offset
; offset
++) {
685 /* Ok, do the async read-ahead now */
686 page
= __read_swap_cache_async(
687 swp_entry(swp_type(entry
), offset
),
688 gfp_mask
, vma
, addr
, &page_allocated
);
691 if (page_allocated
) {
692 swap_readpage(page
, false);
693 if (offset
!= entry_offset
) {
694 SetPageReadahead(page
);
695 count_vm_event(SWAP_RA
);
700 blk_finish_plug(&plug
);
702 lru_add_drain(); /* Push any new pages onto the LRU now */
704 return read_swap_cache_async(entry
, gfp_mask
, vma
, addr
, do_poll
);
707 int init_swap_address_space(unsigned int type
, unsigned long nr_pages
)
709 struct address_space
*spaces
, *space
;
712 nr
= DIV_ROUND_UP(nr_pages
, SWAP_ADDRESS_SPACE_PAGES
);
713 spaces
= kvcalloc(nr
, sizeof(struct address_space
), GFP_KERNEL
);
716 for (i
= 0; i
< nr
; i
++) {
718 xa_init_flags(&space
->i_pages
, XA_FLAGS_LOCK_IRQ
);
719 atomic_set(&space
->i_mmap_writable
, 0);
720 space
->a_ops
= &swap_aops
;
721 /* swap cache doesn't use writeback related tags */
722 mapping_set_no_writeback_tags(space
);
724 nr_swapper_spaces
[type
] = nr
;
725 swapper_spaces
[type
] = spaces
;
730 void exit_swap_address_space(unsigned int type
)
732 kvfree(swapper_spaces
[type
]);
733 nr_swapper_spaces
[type
] = 0;
734 swapper_spaces
[type
] = NULL
;
737 static inline void swap_ra_clamp_pfn(struct vm_area_struct
*vma
,
741 unsigned long *start
,
744 *start
= max3(lpfn
, PFN_DOWN(vma
->vm_start
),
745 PFN_DOWN(faddr
& PMD_MASK
));
746 *end
= min3(rpfn
, PFN_DOWN(vma
->vm_end
),
747 PFN_DOWN((faddr
& PMD_MASK
) + PMD_SIZE
));
750 static void swap_ra_info(struct vm_fault
*vmf
,
751 struct vma_swap_readahead
*ra_info
)
753 struct vm_area_struct
*vma
= vmf
->vma
;
754 unsigned long ra_val
;
756 unsigned long faddr
, pfn
, fpfn
;
757 unsigned long start
, end
;
758 pte_t
*pte
, *orig_pte
;
759 unsigned int max_win
, hits
, prev_win
, win
, left
;
764 max_win
= 1 << min_t(unsigned int, READ_ONCE(page_cluster
),
765 SWAP_RA_ORDER_CEILING
);
771 faddr
= vmf
->address
;
772 orig_pte
= pte
= pte_offset_map(vmf
->pmd
, faddr
);
773 entry
= pte_to_swp_entry(*pte
);
774 if ((unlikely(non_swap_entry(entry
)))) {
779 fpfn
= PFN_DOWN(faddr
);
780 ra_val
= GET_SWAP_RA_VAL(vma
);
781 pfn
= PFN_DOWN(SWAP_RA_ADDR(ra_val
));
782 prev_win
= SWAP_RA_WIN(ra_val
);
783 hits
= SWAP_RA_HITS(ra_val
);
784 ra_info
->win
= win
= __swapin_nr_pages(pfn
, fpfn
, hits
,
786 atomic_long_set(&vma
->swap_readahead_info
,
787 SWAP_RA_VAL(faddr
, win
, 0));
794 /* Copy the PTEs because the page table may be unmapped */
796 swap_ra_clamp_pfn(vma
, faddr
, fpfn
, fpfn
+ win
, &start
, &end
);
797 else if (pfn
== fpfn
+ 1)
798 swap_ra_clamp_pfn(vma
, faddr
, fpfn
- win
+ 1, fpfn
+ 1,
801 left
= (win
- 1) / 2;
802 swap_ra_clamp_pfn(vma
, faddr
, fpfn
- left
, fpfn
+ win
- left
,
805 ra_info
->nr_pte
= end
- start
;
806 ra_info
->offset
= fpfn
- start
;
807 pte
-= ra_info
->offset
;
811 tpte
= ra_info
->ptes
;
812 for (pfn
= start
; pfn
!= end
; pfn
++)
819 * swap_vma_readahead - swap in pages in hope we need them soon
820 * @fentry: swap entry of this memory
821 * @gfp_mask: memory allocation flags
822 * @vmf: fault information
824 * Returns the struct page for entry and addr, after queueing swapin.
826 * Primitive swap readahead code. We simply read in a few pages whoes
827 * virtual addresses are around the fault address in the same vma.
829 * Caller must hold read mmap_lock if vmf->vma is not NULL.
832 static struct page
*swap_vma_readahead(swp_entry_t fentry
, gfp_t gfp_mask
,
833 struct vm_fault
*vmf
)
835 struct blk_plug plug
;
836 struct vm_area_struct
*vma
= vmf
->vma
;
842 struct vma_swap_readahead ra_info
= {
846 swap_ra_info(vmf
, &ra_info
);
847 if (ra_info
.win
== 1)
850 blk_start_plug(&plug
);
851 for (i
= 0, pte
= ra_info
.ptes
; i
< ra_info
.nr_pte
;
854 if (pte_none(pentry
))
856 if (pte_present(pentry
))
858 entry
= pte_to_swp_entry(pentry
);
859 if (unlikely(non_swap_entry(entry
)))
861 page
= __read_swap_cache_async(entry
, gfp_mask
, vma
,
862 vmf
->address
, &page_allocated
);
865 if (page_allocated
) {
866 swap_readpage(page
, false);
867 if (i
!= ra_info
.offset
) {
868 SetPageReadahead(page
);
869 count_vm_event(SWAP_RA
);
874 blk_finish_plug(&plug
);
877 return read_swap_cache_async(fentry
, gfp_mask
, vma
, vmf
->address
,
882 * swapin_readahead - swap in pages in hope we need them soon
883 * @entry: swap entry of this memory
884 * @gfp_mask: memory allocation flags
885 * @vmf: fault information
887 * Returns the struct page for entry and addr, after queueing swapin.
889 * It's a main entry function for swap readahead. By the configuration,
890 * it will read ahead blocks by cluster-based(ie, physical disk based)
891 * or vma-based(ie, virtual address based on faulty address) readahead.
893 struct page
*swapin_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
894 struct vm_fault
*vmf
)
896 return swap_use_vma_readahead() ?
897 swap_vma_readahead(entry
, gfp_mask
, vmf
) :
898 swap_cluster_readahead(entry
, gfp_mask
, vmf
);
902 static ssize_t
vma_ra_enabled_show(struct kobject
*kobj
,
903 struct kobj_attribute
*attr
, char *buf
)
905 return sysfs_emit(buf
, "%s\n",
906 enable_vma_readahead
? "true" : "false");
908 static ssize_t
vma_ra_enabled_store(struct kobject
*kobj
,
909 struct kobj_attribute
*attr
,
910 const char *buf
, size_t count
)
912 if (!strncmp(buf
, "true", 4) || !strncmp(buf
, "1", 1))
913 enable_vma_readahead
= true;
914 else if (!strncmp(buf
, "false", 5) || !strncmp(buf
, "0", 1))
915 enable_vma_readahead
= false;
921 static struct kobj_attribute vma_ra_enabled_attr
=
922 __ATTR(vma_ra_enabled
, 0644, vma_ra_enabled_show
,
923 vma_ra_enabled_store
);
925 static struct attribute
*swap_attrs
[] = {
926 &vma_ra_enabled_attr
.attr
,
930 static struct attribute_group swap_attr_group
= {
934 static int __init
swap_init_sysfs(void)
937 struct kobject
*swap_kobj
;
939 swap_kobj
= kobject_create_and_add("swap", mm_kobj
);
941 pr_err("failed to create swap kobject\n");
944 err
= sysfs_create_group(swap_kobj
, &swap_attr_group
);
946 pr_err("failed to register swap group\n");
952 kobject_put(swap_kobj
);
955 subsys_initcall(swap_init_sysfs
);