2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
21 #include <asm/pgtable.h>
25 * swapper_space is a fiction, retained to simplify the path through
26 * vmscan's shrink_page_list.
28 static const struct address_space_operations swap_aops
= {
29 .writepage
= swap_writepage
,
30 .set_page_dirty
= swap_set_page_dirty
,
31 #ifdef CONFIG_MIGRATION
32 .migratepage
= migrate_page
,
36 struct address_space swapper_spaces
[MAX_SWAPFILES
] = {
37 [0 ... MAX_SWAPFILES
- 1] = {
38 .page_tree
= RADIX_TREE_INIT(GFP_ATOMIC
|__GFP_NOWARN
),
39 .i_mmap_writable
= ATOMIC_INIT(0),
41 /* swap cache doesn't use writeback related tags */
42 .flags
= 1 << AS_NO_WRITEBACK_TAGS
,
46 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
49 unsigned long add_total
;
50 unsigned long del_total
;
51 unsigned long find_success
;
52 unsigned long find_total
;
55 unsigned long total_swapcache_pages(void)
58 unsigned long ret
= 0;
60 for (i
= 0; i
< MAX_SWAPFILES
; i
++)
61 ret
+= swapper_spaces
[i
].nrpages
;
65 static atomic_t swapin_readahead_hits
= ATOMIC_INIT(4);
67 void show_swap_cache_info(void)
69 printk("%lu pages in swap cache\n", total_swapcache_pages());
70 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
71 swap_cache_info
.add_total
, swap_cache_info
.del_total
,
72 swap_cache_info
.find_success
, swap_cache_info
.find_total
);
73 printk("Free swap = %ldkB\n",
74 get_nr_swap_pages() << (PAGE_SHIFT
- 10));
75 printk("Total swap = %lukB\n", total_swap_pages
<< (PAGE_SHIFT
- 10));
79 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
80 * but sets SwapCache flag and private instead of mapping and index.
82 int __add_to_swap_cache(struct page
*page
, swp_entry_t entry
)
85 struct address_space
*address_space
;
87 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
88 VM_BUG_ON_PAGE(PageSwapCache(page
), page
);
89 VM_BUG_ON_PAGE(!PageSwapBacked(page
), page
);
92 SetPageSwapCache(page
);
93 set_page_private(page
, entry
.val
);
95 address_space
= swap_address_space(entry
);
96 spin_lock_irq(&address_space
->tree_lock
);
97 error
= radix_tree_insert(&address_space
->page_tree
,
98 swp_offset(entry
), page
);
100 address_space
->nrpages
++;
101 __inc_node_page_state(page
, NR_FILE_PAGES
);
102 INC_CACHE_INFO(add_total
);
104 spin_unlock_irq(&address_space
->tree_lock
);
106 if (unlikely(error
)) {
108 * Only the context which have set SWAP_HAS_CACHE flag
109 * would call add_to_swap_cache().
110 * So add_to_swap_cache() doesn't returns -EEXIST.
112 VM_BUG_ON(error
== -EEXIST
);
113 set_page_private(page
, 0UL);
114 ClearPageSwapCache(page
);
122 int add_to_swap_cache(struct page
*page
, swp_entry_t entry
, gfp_t gfp_mask
)
126 error
= radix_tree_maybe_preload(gfp_mask
);
128 error
= __add_to_swap_cache(page
, entry
);
129 radix_tree_preload_end();
135 * This must be called only on pages that have
136 * been verified to be in the swap cache.
138 void __delete_from_swap_cache(struct page
*page
)
141 struct address_space
*address_space
;
143 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
144 VM_BUG_ON_PAGE(!PageSwapCache(page
), page
);
145 VM_BUG_ON_PAGE(PageWriteback(page
), page
);
147 entry
.val
= page_private(page
);
148 address_space
= swap_address_space(entry
);
149 radix_tree_delete(&address_space
->page_tree
, swp_offset(entry
));
150 set_page_private(page
, 0);
151 ClearPageSwapCache(page
);
152 address_space
->nrpages
--;
153 __dec_node_page_state(page
, NR_FILE_PAGES
);
154 INC_CACHE_INFO(del_total
);
158 * add_to_swap - allocate swap space for a page
159 * @page: page we want to move to swap
161 * Allocate swap space for the page and add the page to the
162 * swap cache. Caller needs to hold the page lock.
164 int add_to_swap(struct page
*page
, struct list_head
*list
)
169 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
170 VM_BUG_ON_PAGE(!PageUptodate(page
), page
);
172 entry
= get_swap_page();
176 if (mem_cgroup_try_charge_swap(page
, entry
)) {
177 swapcache_free(entry
);
181 if (unlikely(PageTransHuge(page
)))
182 if (unlikely(split_huge_page_to_list(page
, list
))) {
183 swapcache_free(entry
);
188 * Radix-tree node allocations from PF_MEMALLOC contexts could
189 * completely exhaust the page allocator. __GFP_NOMEMALLOC
190 * stops emergency reserves from being allocated.
192 * TODO: this could cause a theoretical memory reclaim
193 * deadlock in the swap out path.
196 * Add it to the swap cache.
198 err
= add_to_swap_cache(page
, entry
,
199 __GFP_HIGH
|__GFP_NOMEMALLOC
|__GFP_NOWARN
);
203 } else { /* -ENOMEM radix-tree allocation failure */
205 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
206 * clear SWAP_HAS_CACHE flag.
208 swapcache_free(entry
);
214 * This must be called only on pages that have
215 * been verified to be in the swap cache and locked.
216 * It will never put the page into the free list,
217 * the caller has a reference on the page.
219 void delete_from_swap_cache(struct page
*page
)
222 struct address_space
*address_space
;
224 entry
.val
= page_private(page
);
226 address_space
= swap_address_space(entry
);
227 spin_lock_irq(&address_space
->tree_lock
);
228 __delete_from_swap_cache(page
);
229 spin_unlock_irq(&address_space
->tree_lock
);
231 swapcache_free(entry
);
236 * If we are the only user, then try to free up the swap cache.
238 * Its ok to check for PageSwapCache without the page lock
239 * here because we are going to recheck again inside
240 * try_to_free_swap() _with_ the lock.
243 static inline void free_swap_cache(struct page
*page
)
245 if (PageSwapCache(page
) && !page_mapped(page
) && trylock_page(page
)) {
246 try_to_free_swap(page
);
252 * Perform a free_page(), also freeing any swap cache associated with
253 * this page if it is the last user of the page.
255 void free_page_and_swap_cache(struct page
*page
)
257 free_swap_cache(page
);
258 if (!is_huge_zero_page(page
))
263 * Passed an array of pages, drop them all from swapcache and then release
264 * them. They are removed from the LRU and freed if this is their last use.
266 void free_pages_and_swap_cache(struct page
**pages
, int nr
)
268 struct page
**pagep
= pages
;
272 for (i
= 0; i
< nr
; i
++)
273 free_swap_cache(pagep
[i
]);
274 release_pages(pagep
, nr
, false);
278 * Lookup a swap entry in the swap cache. A found page will be returned
279 * unlocked and with its refcount incremented - we rely on the kernel
280 * lock getting page table operations atomic even if we drop the page
281 * lock before returning.
283 struct page
* lookup_swap_cache(swp_entry_t entry
)
287 page
= find_get_page(swap_address_space(entry
), swp_offset(entry
));
290 INC_CACHE_INFO(find_success
);
291 if (TestClearPageReadahead(page
))
292 atomic_inc(&swapin_readahead_hits
);
295 INC_CACHE_INFO(find_total
);
299 struct page
*__read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
300 struct vm_area_struct
*vma
, unsigned long addr
,
301 bool *new_page_allocated
)
303 struct page
*found_page
, *new_page
= NULL
;
304 struct address_space
*swapper_space
= swap_address_space(entry
);
306 *new_page_allocated
= false;
310 * First check the swap cache. Since this is normally
311 * called after lookup_swap_cache() failed, re-calling
312 * that would confuse statistics.
314 found_page
= find_get_page(swapper_space
, swp_offset(entry
));
319 * Get a new page to read into from swap.
322 new_page
= alloc_page_vma(gfp_mask
, vma
, addr
);
324 break; /* Out of memory */
328 * call radix_tree_preload() while we can wait.
330 err
= radix_tree_maybe_preload(gfp_mask
& GFP_RECLAIM_MASK
);
335 * Swap entry may have been freed since our caller observed it.
337 err
= swapcache_prepare(entry
);
338 if (err
== -EEXIST
) {
339 radix_tree_preload_end();
341 * We might race against get_swap_page() and stumble
342 * across a SWAP_HAS_CACHE swap_map entry whose page
343 * has not been brought into the swapcache yet, while
344 * the other end is scheduled away waiting on discard
345 * I/O completion at scan_swap_map().
347 * In order to avoid turning this transitory state
348 * into a permanent loop around this -EEXIST case
349 * if !CONFIG_PREEMPT and the I/O completion happens
350 * to be waiting on the CPU waitqueue where we are now
351 * busy looping, we just conditionally invoke the
352 * scheduler here, if there are some more important
358 if (err
) { /* swp entry is obsolete ? */
359 radix_tree_preload_end();
363 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
364 __SetPageLocked(new_page
);
365 __SetPageSwapBacked(new_page
);
366 err
= __add_to_swap_cache(new_page
, entry
);
368 radix_tree_preload_end();
370 * Initiate read into locked page and return.
372 lru_cache_add_anon(new_page
);
373 *new_page_allocated
= true;
376 radix_tree_preload_end();
377 __ClearPageLocked(new_page
);
379 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
380 * clear SWAP_HAS_CACHE flag.
382 swapcache_free(entry
);
383 } while (err
!= -ENOMEM
);
391 * Locate a page of swap in physical memory, reserving swap cache space
392 * and reading the disk if it is not already cached.
393 * A failure return means that either the page allocation failed or that
394 * the swap entry is no longer in use.
396 struct page
*read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
397 struct vm_area_struct
*vma
, unsigned long addr
)
399 bool page_was_allocated
;
400 struct page
*retpage
= __read_swap_cache_async(entry
, gfp_mask
,
401 vma
, addr
, &page_was_allocated
);
403 if (page_was_allocated
)
404 swap_readpage(retpage
);
409 static unsigned long swapin_nr_pages(unsigned long offset
)
411 static unsigned long prev_offset
;
412 unsigned int pages
, max_pages
, last_ra
;
413 static atomic_t last_readahead_pages
;
415 max_pages
= 1 << READ_ONCE(page_cluster
);
420 * This heuristic has been found to work well on both sequential and
421 * random loads, swapping to hard disk or to SSD: please don't ask
422 * what the "+ 2" means, it just happens to work well, that's all.
424 pages
= atomic_xchg(&swapin_readahead_hits
, 0) + 2;
427 * We can have no readahead hits to judge by: but must not get
428 * stuck here forever, so check for an adjacent offset instead
429 * (and don't even bother to check whether swap type is same).
431 if (offset
!= prev_offset
+ 1 && offset
!= prev_offset
- 1)
433 prev_offset
= offset
;
435 unsigned int roundup
= 4;
436 while (roundup
< pages
)
441 if (pages
> max_pages
)
444 /* Don't shrink readahead too fast */
445 last_ra
= atomic_read(&last_readahead_pages
) / 2;
448 atomic_set(&last_readahead_pages
, pages
);
454 * swapin_readahead - swap in pages in hope we need them soon
455 * @entry: swap entry of this memory
456 * @gfp_mask: memory allocation flags
457 * @vma: user vma this address belongs to
458 * @addr: target address for mempolicy
460 * Returns the struct page for entry and addr, after queueing swapin.
462 * Primitive swap readahead code. We simply read an aligned block of
463 * (1 << page_cluster) entries in the swap area. This method is chosen
464 * because it doesn't cost us any seek time. We also make sure to queue
465 * the 'original' request together with the readahead ones...
467 * This has been extended to use the NUMA policies from the mm triggering
470 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
472 struct page
*swapin_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
473 struct vm_area_struct
*vma
, unsigned long addr
)
476 unsigned long entry_offset
= swp_offset(entry
);
477 unsigned long offset
= entry_offset
;
478 unsigned long start_offset
, end_offset
;
480 struct blk_plug plug
;
482 mask
= swapin_nr_pages(offset
) - 1;
486 /* Read a page_cluster sized and aligned cluster around offset. */
487 start_offset
= offset
& ~mask
;
488 end_offset
= offset
| mask
;
489 if (!start_offset
) /* First page is swap header. */
492 blk_start_plug(&plug
);
493 for (offset
= start_offset
; offset
<= end_offset
; offset
++) {
494 /* Ok, do the async read-ahead now */
495 page
= read_swap_cache_async(swp_entry(swp_type(entry
), offset
),
496 gfp_mask
, vma
, addr
);
499 if (offset
!= entry_offset
)
500 SetPageReadahead(page
);
503 blk_finish_plug(&plug
);
505 lru_add_drain(); /* Push any new pages onto the LRU now */
507 return read_swap_cache_async(entry
, gfp_mask
, vma
, addr
);