Merge tag 'hardening-v6.14-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-stable.git] / mm / swapfile.c
blobba19430dd4eadb7a3f453ab94b311bcc12931390
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/swapfile.c
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 */
9 #include <linux/blkdev.h>
10 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/task.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mman.h>
15 #include <linux/slab.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/swap.h>
18 #include <linux/vmalloc.h>
19 #include <linux/pagemap.h>
20 #include <linux/namei.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/blk-cgroup.h>
23 #include <linux/random.h>
24 #include <linux/writeback.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/init.h>
28 #include <linux/ksm.h>
29 #include <linux/rmap.h>
30 #include <linux/security.h>
31 #include <linux/backing-dev.h>
32 #include <linux/mutex.h>
33 #include <linux/capability.h>
34 #include <linux/syscalls.h>
35 #include <linux/memcontrol.h>
36 #include <linux/poll.h>
37 #include <linux/oom.h>
38 #include <linux/swapfile.h>
39 #include <linux/export.h>
40 #include <linux/swap_slots.h>
41 #include <linux/sort.h>
42 #include <linux/completion.h>
43 #include <linux/suspend.h>
44 #include <linux/zswap.h>
45 #include <linux/plist.h>
47 #include <asm/tlbflush.h>
48 #include <linux/swapops.h>
49 #include <linux/swap_cgroup.h>
50 #include "internal.h"
51 #include "swap.h"
53 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
54 unsigned char);
55 static void free_swap_count_continuations(struct swap_info_struct *);
56 static void swap_entry_range_free(struct swap_info_struct *si,
57 struct swap_cluster_info *ci,
58 swp_entry_t entry, unsigned int nr_pages);
59 static void swap_range_alloc(struct swap_info_struct *si,
60 unsigned int nr_entries);
61 static bool folio_swapcache_freeable(struct folio *folio);
62 static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
63 unsigned long offset);
64 static inline void unlock_cluster(struct swap_cluster_info *ci);
66 static DEFINE_SPINLOCK(swap_lock);
67 static unsigned int nr_swapfiles;
68 atomic_long_t nr_swap_pages;
70 * Some modules use swappable objects and may try to swap them out under
71 * memory pressure (via the shrinker). Before doing so, they may wish to
72 * check to see if any swap space is available.
74 EXPORT_SYMBOL_GPL(nr_swap_pages);
75 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
76 long total_swap_pages;
77 static int least_priority = -1;
78 unsigned long swapfile_maximum_size;
79 #ifdef CONFIG_MIGRATION
80 bool swap_migration_ad_supported;
81 #endif /* CONFIG_MIGRATION */
83 static const char Bad_file[] = "Bad swap file entry ";
84 static const char Unused_file[] = "Unused swap file entry ";
85 static const char Bad_offset[] = "Bad swap offset entry ";
86 static const char Unused_offset[] = "Unused swap offset entry ";
89 * all active swap_info_structs
90 * protected with swap_lock, and ordered by priority.
92 static PLIST_HEAD(swap_active_head);
95 * all available (active, not full) swap_info_structs
96 * protected with swap_avail_lock, ordered by priority.
97 * This is used by folio_alloc_swap() instead of swap_active_head
98 * because swap_active_head includes all swap_info_structs,
99 * but folio_alloc_swap() doesn't need to look at full ones.
100 * This uses its own lock instead of swap_lock because when a
101 * swap_info_struct changes between not-full/full, it needs to
102 * add/remove itself to/from this list, but the swap_info_struct->lock
103 * is held and the locking order requires swap_lock to be taken
104 * before any swap_info_struct->lock.
106 static struct plist_head *swap_avail_heads;
107 static DEFINE_SPINLOCK(swap_avail_lock);
109 static struct swap_info_struct *swap_info[MAX_SWAPFILES];
111 static DEFINE_MUTEX(swapon_mutex);
113 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
114 /* Activity counter to indicate that a swapon or swapoff has occurred */
115 static atomic_t proc_poll_event = ATOMIC_INIT(0);
117 atomic_t nr_rotate_swap = ATOMIC_INIT(0);
119 static struct swap_info_struct *swap_type_to_swap_info(int type)
121 if (type >= MAX_SWAPFILES)
122 return NULL;
124 return READ_ONCE(swap_info[type]); /* rcu_dereference() */
127 static inline unsigned char swap_count(unsigned char ent)
129 return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
133 * Use the second highest bit of inuse_pages counter as the indicator
134 * if one swap device is on the available plist, so the atomic can
135 * still be updated arithmetically while having special data embedded.
137 * inuse_pages counter is the only thing indicating if a device should
138 * be on avail_lists or not (except swapon / swapoff). By embedding the
139 * off-list bit in the atomic counter, updates no longer need any lock
140 * to check the list status.
142 * This bit will be set if the device is not on the plist and not
143 * usable, will be cleared if the device is on the plist.
145 #define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2))
146 #define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT)
147 static long swap_usage_in_pages(struct swap_info_struct *si)
149 return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK;
152 /* Reclaim the swap entry anyway if possible */
153 #define TTRS_ANYWAY 0x1
155 * Reclaim the swap entry if there are no more mappings of the
156 * corresponding page
158 #define TTRS_UNMAPPED 0x2
159 /* Reclaim the swap entry if swap is getting full */
160 #define TTRS_FULL 0x4
161 /* Reclaim directly, bypass the slot cache and don't touch device lock */
162 #define TTRS_DIRECT 0x8
164 static bool swap_is_has_cache(struct swap_info_struct *si,
165 unsigned long offset, int nr_pages)
167 unsigned char *map = si->swap_map + offset;
168 unsigned char *map_end = map + nr_pages;
170 do {
171 VM_BUG_ON(!(*map & SWAP_HAS_CACHE));
172 if (*map != SWAP_HAS_CACHE)
173 return false;
174 } while (++map < map_end);
176 return true;
179 static bool swap_is_last_map(struct swap_info_struct *si,
180 unsigned long offset, int nr_pages, bool *has_cache)
182 unsigned char *map = si->swap_map + offset;
183 unsigned char *map_end = map + nr_pages;
184 unsigned char count = *map;
186 if (swap_count(count) != 1)
187 return false;
189 while (++map < map_end) {
190 if (*map != count)
191 return false;
194 *has_cache = !!(count & SWAP_HAS_CACHE);
195 return true;
199 * returns number of pages in the folio that backs the swap entry. If positive,
200 * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no
201 * folio was associated with the swap entry.
203 static int __try_to_reclaim_swap(struct swap_info_struct *si,
204 unsigned long offset, unsigned long flags)
206 swp_entry_t entry = swp_entry(si->type, offset);
207 struct address_space *address_space = swap_address_space(entry);
208 struct swap_cluster_info *ci;
209 struct folio *folio;
210 int ret, nr_pages;
211 bool need_reclaim;
213 folio = filemap_get_folio(address_space, swap_cache_index(entry));
214 if (IS_ERR(folio))
215 return 0;
217 nr_pages = folio_nr_pages(folio);
218 ret = -nr_pages;
221 * When this function is called from scan_swap_map_slots() and it's
222 * called by vmscan.c at reclaiming folios. So we hold a folio lock
223 * here. We have to use trylock for avoiding deadlock. This is a special
224 * case and you should use folio_free_swap() with explicit folio_lock()
225 * in usual operations.
227 if (!folio_trylock(folio))
228 goto out;
230 /* offset could point to the middle of a large folio */
231 entry = folio->swap;
232 offset = swp_offset(entry);
234 need_reclaim = ((flags & TTRS_ANYWAY) ||
235 ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
236 ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)));
237 if (!need_reclaim || !folio_swapcache_freeable(folio))
238 goto out_unlock;
241 * It's safe to delete the folio from swap cache only if the folio's
242 * swap_map is HAS_CACHE only, which means the slots have no page table
243 * reference or pending writeback, and can't be allocated to others.
245 ci = lock_cluster(si, offset);
246 need_reclaim = swap_is_has_cache(si, offset, nr_pages);
247 unlock_cluster(ci);
248 if (!need_reclaim)
249 goto out_unlock;
251 if (!(flags & TTRS_DIRECT)) {
252 /* Free through slot cache */
253 delete_from_swap_cache(folio);
254 folio_set_dirty(folio);
255 ret = nr_pages;
256 goto out_unlock;
259 xa_lock_irq(&address_space->i_pages);
260 __delete_from_swap_cache(folio, entry, NULL);
261 xa_unlock_irq(&address_space->i_pages);
262 folio_ref_sub(folio, nr_pages);
263 folio_set_dirty(folio);
265 ci = lock_cluster(si, offset);
266 swap_entry_range_free(si, ci, entry, nr_pages);
267 unlock_cluster(ci);
268 ret = nr_pages;
269 out_unlock:
270 folio_unlock(folio);
271 out:
272 folio_put(folio);
273 return ret;
276 static inline struct swap_extent *first_se(struct swap_info_struct *sis)
278 struct rb_node *rb = rb_first(&sis->swap_extent_root);
279 return rb_entry(rb, struct swap_extent, rb_node);
282 static inline struct swap_extent *next_se(struct swap_extent *se)
284 struct rb_node *rb = rb_next(&se->rb_node);
285 return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
289 * swapon tell device that all the old swap contents can be discarded,
290 * to allow the swap device to optimize its wear-levelling.
292 static int discard_swap(struct swap_info_struct *si)
294 struct swap_extent *se;
295 sector_t start_block;
296 sector_t nr_blocks;
297 int err = 0;
299 /* Do not discard the swap header page! */
300 se = first_se(si);
301 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
302 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
303 if (nr_blocks) {
304 err = blkdev_issue_discard(si->bdev, start_block,
305 nr_blocks, GFP_KERNEL);
306 if (err)
307 return err;
308 cond_resched();
311 for (se = next_se(se); se; se = next_se(se)) {
312 start_block = se->start_block << (PAGE_SHIFT - 9);
313 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
315 err = blkdev_issue_discard(si->bdev, start_block,
316 nr_blocks, GFP_KERNEL);
317 if (err)
318 break;
320 cond_resched();
322 return err; /* That will often be -EOPNOTSUPP */
325 static struct swap_extent *
326 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
328 struct swap_extent *se;
329 struct rb_node *rb;
331 rb = sis->swap_extent_root.rb_node;
332 while (rb) {
333 se = rb_entry(rb, struct swap_extent, rb_node);
334 if (offset < se->start_page)
335 rb = rb->rb_left;
336 else if (offset >= se->start_page + se->nr_pages)
337 rb = rb->rb_right;
338 else
339 return se;
341 /* It *must* be present */
342 BUG();
345 sector_t swap_folio_sector(struct folio *folio)
347 struct swap_info_struct *sis = swp_swap_info(folio->swap);
348 struct swap_extent *se;
349 sector_t sector;
350 pgoff_t offset;
352 offset = swp_offset(folio->swap);
353 se = offset_to_swap_extent(sis, offset);
354 sector = se->start_block + (offset - se->start_page);
355 return sector << (PAGE_SHIFT - 9);
359 * swap allocation tell device that a cluster of swap can now be discarded,
360 * to allow the swap device to optimize its wear-levelling.
362 static void discard_swap_cluster(struct swap_info_struct *si,
363 pgoff_t start_page, pgoff_t nr_pages)
365 struct swap_extent *se = offset_to_swap_extent(si, start_page);
367 while (nr_pages) {
368 pgoff_t offset = start_page - se->start_page;
369 sector_t start_block = se->start_block + offset;
370 sector_t nr_blocks = se->nr_pages - offset;
372 if (nr_blocks > nr_pages)
373 nr_blocks = nr_pages;
374 start_page += nr_blocks;
375 nr_pages -= nr_blocks;
377 start_block <<= PAGE_SHIFT - 9;
378 nr_blocks <<= PAGE_SHIFT - 9;
379 if (blkdev_issue_discard(si->bdev, start_block,
380 nr_blocks, GFP_NOIO))
381 break;
383 se = next_se(se);
387 #ifdef CONFIG_THP_SWAP
388 #define SWAPFILE_CLUSTER HPAGE_PMD_NR
390 #define swap_entry_order(order) (order)
391 #else
392 #define SWAPFILE_CLUSTER 256
395 * Define swap_entry_order() as constant to let compiler to optimize
396 * out some code if !CONFIG_THP_SWAP
398 #define swap_entry_order(order) 0
399 #endif
400 #define LATENCY_LIMIT 256
402 static inline bool cluster_is_empty(struct swap_cluster_info *info)
404 return info->count == 0;
407 static inline bool cluster_is_discard(struct swap_cluster_info *info)
409 return info->flags == CLUSTER_FLAG_DISCARD;
412 static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order)
414 if (unlikely(ci->flags > CLUSTER_FLAG_USABLE))
415 return false;
416 if (!order)
417 return true;
418 return cluster_is_empty(ci) || order == ci->order;
421 static inline unsigned int cluster_index(struct swap_info_struct *si,
422 struct swap_cluster_info *ci)
424 return ci - si->cluster_info;
427 static inline struct swap_cluster_info *offset_to_cluster(struct swap_info_struct *si,
428 unsigned long offset)
430 return &si->cluster_info[offset / SWAPFILE_CLUSTER];
433 static inline unsigned int cluster_offset(struct swap_info_struct *si,
434 struct swap_cluster_info *ci)
436 return cluster_index(si, ci) * SWAPFILE_CLUSTER;
439 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
440 unsigned long offset)
442 struct swap_cluster_info *ci;
444 ci = offset_to_cluster(si, offset);
445 spin_lock(&ci->lock);
447 return ci;
450 static inline void unlock_cluster(struct swap_cluster_info *ci)
452 spin_unlock(&ci->lock);
455 static void move_cluster(struct swap_info_struct *si,
456 struct swap_cluster_info *ci, struct list_head *list,
457 enum swap_cluster_flags new_flags)
459 VM_WARN_ON(ci->flags == new_flags);
461 BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX);
462 lockdep_assert_held(&ci->lock);
464 spin_lock(&si->lock);
465 if (ci->flags == CLUSTER_FLAG_NONE)
466 list_add_tail(&ci->list, list);
467 else
468 list_move_tail(&ci->list, list);
469 spin_unlock(&si->lock);
471 if (ci->flags == CLUSTER_FLAG_FRAG)
472 atomic_long_dec(&si->frag_cluster_nr[ci->order]);
473 else if (new_flags == CLUSTER_FLAG_FRAG)
474 atomic_long_inc(&si->frag_cluster_nr[ci->order]);
475 ci->flags = new_flags;
478 /* Add a cluster to discard list and schedule it to do discard */
479 static void swap_cluster_schedule_discard(struct swap_info_struct *si,
480 struct swap_cluster_info *ci)
482 unsigned int idx = cluster_index(si, ci);
484 * If scan_swap_map_slots() can't find a free cluster, it will check
485 * si->swap_map directly. To make sure the discarding cluster isn't
486 * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
487 * It will be cleared after discard
489 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
490 SWAP_MAP_BAD, SWAPFILE_CLUSTER);
491 VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
492 move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD);
493 schedule_work(&si->discard_work);
496 static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
498 lockdep_assert_held(&ci->lock);
499 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
500 ci->order = 0;
504 * Isolate and lock the first cluster that is not contented on a list,
505 * clean its flag before taken off-list. Cluster flag must be in sync
506 * with list status, so cluster updaters can always know the cluster
507 * list status without touching si lock.
509 * Note it's possible that all clusters on a list are contented so
510 * this returns NULL for an non-empty list.
512 static struct swap_cluster_info *isolate_lock_cluster(
513 struct swap_info_struct *si, struct list_head *list)
515 struct swap_cluster_info *ci, *ret = NULL;
517 spin_lock(&si->lock);
519 if (unlikely(!(si->flags & SWP_WRITEOK)))
520 goto out;
522 list_for_each_entry(ci, list, list) {
523 if (!spin_trylock(&ci->lock))
524 continue;
526 /* We may only isolate and clear flags of following lists */
527 VM_BUG_ON(!ci->flags);
528 VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE &&
529 ci->flags != CLUSTER_FLAG_FULL);
531 list_del(&ci->list);
532 ci->flags = CLUSTER_FLAG_NONE;
533 ret = ci;
534 break;
536 out:
537 spin_unlock(&si->lock);
539 return ret;
543 * Doing discard actually. After a cluster discard is finished, the cluster
544 * will be added to free cluster list. Discard cluster is a bit special as
545 * they don't participate in allocation or reclaim, so clusters marked as
546 * CLUSTER_FLAG_DISCARD must remain off-list or on discard list.
548 static bool swap_do_scheduled_discard(struct swap_info_struct *si)
550 struct swap_cluster_info *ci;
551 bool ret = false;
552 unsigned int idx;
554 spin_lock(&si->lock);
555 while (!list_empty(&si->discard_clusters)) {
556 ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
558 * Delete the cluster from list to prepare for discard, but keep
559 * the CLUSTER_FLAG_DISCARD flag, there could be percpu_cluster
560 * pointing to it, or ran into by relocate_cluster.
562 list_del(&ci->list);
563 idx = cluster_index(si, ci);
564 spin_unlock(&si->lock);
565 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
566 SWAPFILE_CLUSTER);
568 spin_lock(&ci->lock);
570 * Discard is done, clear its flags as it's off-list, then
571 * return the cluster to allocation list.
573 ci->flags = CLUSTER_FLAG_NONE;
574 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
575 0, SWAPFILE_CLUSTER);
576 __free_cluster(si, ci);
577 spin_unlock(&ci->lock);
578 ret = true;
579 spin_lock(&si->lock);
581 spin_unlock(&si->lock);
582 return ret;
585 static void swap_discard_work(struct work_struct *work)
587 struct swap_info_struct *si;
589 si = container_of(work, struct swap_info_struct, discard_work);
591 swap_do_scheduled_discard(si);
594 static void swap_users_ref_free(struct percpu_ref *ref)
596 struct swap_info_struct *si;
598 si = container_of(ref, struct swap_info_struct, users);
599 complete(&si->comp);
603 * Must be called after freeing if ci->count == 0, moves the cluster to free
604 * or discard list.
606 static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
608 VM_BUG_ON(ci->count != 0);
609 VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
610 lockdep_assert_held(&ci->lock);
613 * If the swap is discardable, prepare discard the cluster
614 * instead of free it immediately. The cluster will be freed
615 * after discard.
617 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
618 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
619 swap_cluster_schedule_discard(si, ci);
620 return;
623 __free_cluster(si, ci);
627 * Must be called after freeing if ci->count != 0, moves the cluster to
628 * nonfull list.
630 static void partial_free_cluster(struct swap_info_struct *si,
631 struct swap_cluster_info *ci)
633 VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER);
634 lockdep_assert_held(&ci->lock);
636 if (ci->flags != CLUSTER_FLAG_NONFULL)
637 move_cluster(si, ci, &si->nonfull_clusters[ci->order],
638 CLUSTER_FLAG_NONFULL);
642 * Must be called after allocation, moves the cluster to full or frag list.
643 * Note: allocation doesn't acquire si lock, and may drop the ci lock for
644 * reclaim, so the cluster could be any where when called.
646 static void relocate_cluster(struct swap_info_struct *si,
647 struct swap_cluster_info *ci)
649 lockdep_assert_held(&ci->lock);
651 /* Discard cluster must remain off-list or on discard list */
652 if (cluster_is_discard(ci))
653 return;
655 if (!ci->count) {
656 free_cluster(si, ci);
657 } else if (ci->count != SWAPFILE_CLUSTER) {
658 if (ci->flags != CLUSTER_FLAG_FRAG)
659 move_cluster(si, ci, &si->frag_clusters[ci->order],
660 CLUSTER_FLAG_FRAG);
661 } else {
662 if (ci->flags != CLUSTER_FLAG_FULL)
663 move_cluster(si, ci, &si->full_clusters,
664 CLUSTER_FLAG_FULL);
669 * The cluster corresponding to page_nr will be used. The cluster will not be
670 * added to free cluster list and its usage counter will be increased by 1.
671 * Only used for initialization.
673 static void inc_cluster_info_page(struct swap_info_struct *si,
674 struct swap_cluster_info *cluster_info, unsigned long page_nr)
676 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
677 struct swap_cluster_info *ci;
679 ci = cluster_info + idx;
680 ci->count++;
682 VM_BUG_ON(ci->count > SWAPFILE_CLUSTER);
683 VM_BUG_ON(ci->flags);
686 static bool cluster_reclaim_range(struct swap_info_struct *si,
687 struct swap_cluster_info *ci,
688 unsigned long start, unsigned long end)
690 unsigned char *map = si->swap_map;
691 unsigned long offset = start;
692 int nr_reclaim;
694 spin_unlock(&ci->lock);
695 do {
696 switch (READ_ONCE(map[offset])) {
697 case 0:
698 offset++;
699 break;
700 case SWAP_HAS_CACHE:
701 nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
702 if (nr_reclaim > 0)
703 offset += nr_reclaim;
704 else
705 goto out;
706 break;
707 default:
708 goto out;
710 } while (offset < end);
711 out:
712 spin_lock(&ci->lock);
714 * Recheck the range no matter reclaim succeeded or not, the slot
715 * could have been be freed while we are not holding the lock.
717 for (offset = start; offset < end; offset++)
718 if (READ_ONCE(map[offset]))
719 return false;
721 return true;
724 static bool cluster_scan_range(struct swap_info_struct *si,
725 struct swap_cluster_info *ci,
726 unsigned long start, unsigned int nr_pages,
727 bool *need_reclaim)
729 unsigned long offset, end = start + nr_pages;
730 unsigned char *map = si->swap_map;
732 for (offset = start; offset < end; offset++) {
733 switch (READ_ONCE(map[offset])) {
734 case 0:
735 continue;
736 case SWAP_HAS_CACHE:
737 if (!vm_swap_full())
738 return false;
739 *need_reclaim = true;
740 continue;
741 default:
742 return false;
746 return true;
749 static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
750 unsigned int start, unsigned char usage,
751 unsigned int order)
753 unsigned int nr_pages = 1 << order;
755 lockdep_assert_held(&ci->lock);
757 if (!(si->flags & SWP_WRITEOK))
758 return false;
761 * The first allocation in a cluster makes the
762 * cluster exclusive to this order
764 if (cluster_is_empty(ci))
765 ci->order = order;
767 memset(si->swap_map + start, usage, nr_pages);
768 swap_range_alloc(si, nr_pages);
769 ci->count += nr_pages;
771 return true;
774 /* Try use a new cluster for current CPU and allocate from it. */
775 static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
776 struct swap_cluster_info *ci,
777 unsigned long offset,
778 unsigned int order,
779 unsigned char usage)
781 unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
782 unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER);
783 unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
784 unsigned int nr_pages = 1 << order;
785 bool need_reclaim, ret;
787 lockdep_assert_held(&ci->lock);
789 if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER)
790 goto out;
792 for (end -= nr_pages; offset <= end; offset += nr_pages) {
793 need_reclaim = false;
794 if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim))
795 continue;
796 if (need_reclaim) {
797 ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages);
799 * Reclaim drops ci->lock and cluster could be used
800 * by another order. Not checking flag as off-list
801 * cluster has no flag set, and change of list
802 * won't cause fragmentation.
804 if (!cluster_is_usable(ci, order))
805 goto out;
806 if (cluster_is_empty(ci))
807 offset = start;
808 /* Reclaim failed but cluster is usable, try next */
809 if (!ret)
810 continue;
812 if (!cluster_alloc_range(si, ci, offset, usage, order))
813 break;
814 found = offset;
815 offset += nr_pages;
816 if (ci->count < SWAPFILE_CLUSTER && offset <= end)
817 next = offset;
818 break;
820 out:
821 relocate_cluster(si, ci);
822 unlock_cluster(ci);
823 if (si->flags & SWP_SOLIDSTATE)
824 __this_cpu_write(si->percpu_cluster->next[order], next);
825 else
826 si->global_cluster->next[order] = next;
827 return found;
830 /* Return true if reclaimed a whole cluster */
831 static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
833 long to_scan = 1;
834 unsigned long offset, end;
835 struct swap_cluster_info *ci;
836 unsigned char *map = si->swap_map;
837 int nr_reclaim;
839 if (force)
840 to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER;
842 while ((ci = isolate_lock_cluster(si, &si->full_clusters))) {
843 offset = cluster_offset(si, ci);
844 end = min(si->max, offset + SWAPFILE_CLUSTER);
845 to_scan--;
847 while (offset < end) {
848 if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
849 spin_unlock(&ci->lock);
850 nr_reclaim = __try_to_reclaim_swap(si, offset,
851 TTRS_ANYWAY | TTRS_DIRECT);
852 spin_lock(&ci->lock);
853 if (nr_reclaim) {
854 offset += abs(nr_reclaim);
855 continue;
858 offset++;
861 unlock_cluster(ci);
862 if (to_scan <= 0)
863 break;
867 static void swap_reclaim_work(struct work_struct *work)
869 struct swap_info_struct *si;
871 si = container_of(work, struct swap_info_struct, reclaim_work);
873 swap_reclaim_full_clusters(si, true);
877 * Try to get swap entries with specified order from current cpu's swap entry
878 * pool (a cluster). This might involve allocating a new cluster for current CPU
879 * too.
881 static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
882 unsigned char usage)
884 struct swap_cluster_info *ci;
885 unsigned int offset, found = 0;
887 if (si->flags & SWP_SOLIDSTATE) {
888 /* Fast path using per CPU cluster */
889 local_lock(&si->percpu_cluster->lock);
890 offset = __this_cpu_read(si->percpu_cluster->next[order]);
891 } else {
892 /* Serialize HDD SWAP allocation for each device. */
893 spin_lock(&si->global_cluster_lock);
894 offset = si->global_cluster->next[order];
897 if (offset) {
898 ci = lock_cluster(si, offset);
899 /* Cluster could have been used by another order */
900 if (cluster_is_usable(ci, order)) {
901 if (cluster_is_empty(ci))
902 offset = cluster_offset(si, ci);
903 found = alloc_swap_scan_cluster(si, ci, offset,
904 order, usage);
905 } else {
906 unlock_cluster(ci);
908 if (found)
909 goto done;
912 new_cluster:
913 ci = isolate_lock_cluster(si, &si->free_clusters);
914 if (ci) {
915 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
916 order, usage);
917 if (found)
918 goto done;
921 /* Try reclaim from full clusters if free clusters list is drained */
922 if (vm_swap_full())
923 swap_reclaim_full_clusters(si, false);
925 if (order < PMD_ORDER) {
926 unsigned int frags = 0, frags_existing;
928 while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) {
929 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
930 order, usage);
931 if (found)
932 goto done;
933 /* Clusters failed to allocate are moved to frag_clusters */
934 frags++;
937 frags_existing = atomic_long_read(&si->frag_cluster_nr[order]);
938 while (frags < frags_existing &&
939 (ci = isolate_lock_cluster(si, &si->frag_clusters[order]))) {
940 atomic_long_dec(&si->frag_cluster_nr[order]);
942 * Rotate the frag list to iterate, they were all
943 * failing high order allocation or moved here due to
944 * per-CPU usage, but they could contain newly released
945 * reclaimable (eg. lazy-freed swap cache) slots.
947 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
948 order, usage);
949 if (found)
950 goto done;
951 frags++;
956 * We don't have free cluster but have some clusters in
957 * discarding, do discard now and reclaim them, then
958 * reread cluster_next_cpu since we dropped si->lock
960 if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si))
961 goto new_cluster;
963 if (order)
964 goto done;
966 /* Order 0 stealing from higher order */
967 for (int o = 1; o < SWAP_NR_ORDERS; o++) {
969 * Clusters here have at least one usable slots and can't fail order 0
970 * allocation, but reclaim may drop si->lock and race with another user.
972 while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) {
973 atomic_long_dec(&si->frag_cluster_nr[o]);
974 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
975 0, usage);
976 if (found)
977 goto done;
980 while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) {
981 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
982 0, usage);
983 if (found)
984 goto done;
987 done:
988 if (si->flags & SWP_SOLIDSTATE)
989 local_unlock(&si->percpu_cluster->lock);
990 else
991 spin_unlock(&si->global_cluster_lock);
992 return found;
995 /* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */
996 static void del_from_avail_list(struct swap_info_struct *si, bool swapoff)
998 int nid;
999 unsigned long pages;
1001 spin_lock(&swap_avail_lock);
1003 if (swapoff) {
1005 * Forcefully remove it. Clear the SWP_WRITEOK flags for
1006 * swapoff here so it's synchronized by both si->lock and
1007 * swap_avail_lock, to ensure the result can be seen by
1008 * add_to_avail_list.
1010 lockdep_assert_held(&si->lock);
1011 si->flags &= ~SWP_WRITEOK;
1012 atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
1013 } else {
1015 * If not called by swapoff, take it off-list only if it's
1016 * full and SWAP_USAGE_OFFLIST_BIT is not set (strictly
1017 * si->inuse_pages == pages), any concurrent slot freeing,
1018 * or device already removed from plist by someone else
1019 * will make this return false.
1021 pages = si->pages;
1022 if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
1023 pages | SWAP_USAGE_OFFLIST_BIT))
1024 goto skip;
1027 for_each_node(nid)
1028 plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
1030 skip:
1031 spin_unlock(&swap_avail_lock);
1034 /* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */
1035 static void add_to_avail_list(struct swap_info_struct *si, bool swapon)
1037 int nid;
1038 long val;
1039 unsigned long pages;
1041 spin_lock(&swap_avail_lock);
1043 /* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */
1044 if (swapon) {
1045 lockdep_assert_held(&si->lock);
1046 si->flags |= SWP_WRITEOK;
1047 } else {
1048 if (!(READ_ONCE(si->flags) & SWP_WRITEOK))
1049 goto skip;
1052 if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT))
1053 goto skip;
1055 val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
1058 * When device is full and device is on the plist, only one updater will
1059 * see (inuse_pages == si->pages) and will call del_from_avail_list. If
1060 * that updater happen to be here, just skip adding.
1062 pages = si->pages;
1063 if (val == pages) {
1064 /* Just like the cmpxchg in del_from_avail_list */
1065 if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
1066 pages | SWAP_USAGE_OFFLIST_BIT))
1067 goto skip;
1070 for_each_node(nid)
1071 plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
1073 skip:
1074 spin_unlock(&swap_avail_lock);
1078 * swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock
1079 * within each cluster, so the total contribution to the global counter should
1080 * always be positive and cannot exceed the total number of usable slots.
1082 static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries)
1084 long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages);
1087 * If device is full, and SWAP_USAGE_OFFLIST_BIT is not set,
1088 * remove it from the plist.
1090 if (unlikely(val == si->pages)) {
1091 del_from_avail_list(si, false);
1092 return true;
1095 return false;
1098 static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries)
1100 long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages);
1103 * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set,
1104 * remove it from the plist.
1106 if (unlikely(val & SWAP_USAGE_OFFLIST_BIT))
1107 add_to_avail_list(si, false);
1110 static void swap_range_alloc(struct swap_info_struct *si,
1111 unsigned int nr_entries)
1113 if (swap_usage_add(si, nr_entries)) {
1114 if (vm_swap_full())
1115 schedule_work(&si->reclaim_work);
1119 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
1120 unsigned int nr_entries)
1122 unsigned long begin = offset;
1123 unsigned long end = offset + nr_entries - 1;
1124 void (*swap_slot_free_notify)(struct block_device *, unsigned long);
1125 unsigned int i;
1128 * Use atomic clear_bit operations only on zeromap instead of non-atomic
1129 * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
1131 for (i = 0; i < nr_entries; i++) {
1132 clear_bit(offset + i, si->zeromap);
1133 zswap_invalidate(swp_entry(si->type, offset + i));
1136 if (si->flags & SWP_BLKDEV)
1137 swap_slot_free_notify =
1138 si->bdev->bd_disk->fops->swap_slot_free_notify;
1139 else
1140 swap_slot_free_notify = NULL;
1141 while (offset <= end) {
1142 arch_swap_invalidate_page(si->type, offset);
1143 if (swap_slot_free_notify)
1144 swap_slot_free_notify(si->bdev, offset);
1145 offset++;
1147 clear_shadow_from_swap_cache(si->type, begin, end);
1150 * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
1151 * only after the above cleanups are done.
1153 smp_wmb();
1154 atomic_long_add(nr_entries, &nr_swap_pages);
1155 swap_usage_sub(si, nr_entries);
1158 static int cluster_alloc_swap(struct swap_info_struct *si,
1159 unsigned char usage, int nr,
1160 swp_entry_t slots[], int order)
1162 int n_ret = 0;
1164 while (n_ret < nr) {
1165 unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
1167 if (!offset)
1168 break;
1169 slots[n_ret++] = swp_entry(si->type, offset);
1172 return n_ret;
1175 static int scan_swap_map_slots(struct swap_info_struct *si,
1176 unsigned char usage, int nr,
1177 swp_entry_t slots[], int order)
1179 unsigned int nr_pages = 1 << order;
1182 * We try to cluster swap pages by allocating them sequentially
1183 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
1184 * way, however, we resort to first-free allocation, starting
1185 * a new cluster. This prevents us from scattering swap pages
1186 * all over the entire swap partition, so that we reduce
1187 * overall disk seek times between swap pages. -- sct
1188 * But we do now try to find an empty cluster. -Andrea
1189 * And we let swap pages go all over an SSD partition. Hugh
1191 if (order > 0) {
1193 * Should not even be attempting large allocations when huge
1194 * page swap is disabled. Warn and fail the allocation.
1196 if (!IS_ENABLED(CONFIG_THP_SWAP) ||
1197 nr_pages > SWAPFILE_CLUSTER) {
1198 VM_WARN_ON_ONCE(1);
1199 return 0;
1203 * Swapfile is not block device so unable
1204 * to allocate large entries.
1206 if (!(si->flags & SWP_BLKDEV))
1207 return 0;
1210 return cluster_alloc_swap(si, usage, nr, slots, order);
1213 static bool get_swap_device_info(struct swap_info_struct *si)
1215 if (!percpu_ref_tryget_live(&si->users))
1216 return false;
1218 * Guarantee the si->users are checked before accessing other
1219 * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is
1220 * up to dated.
1222 * Paired with the spin_unlock() after setup_swap_info() in
1223 * enable_swap_info(), and smp_wmb() in swapoff.
1225 smp_rmb();
1226 return true;
1229 int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order)
1231 int order = swap_entry_order(entry_order);
1232 unsigned long size = 1 << order;
1233 struct swap_info_struct *si, *next;
1234 long avail_pgs;
1235 int n_ret = 0;
1236 int node;
1238 spin_lock(&swap_avail_lock);
1240 avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1241 if (avail_pgs <= 0) {
1242 spin_unlock(&swap_avail_lock);
1243 goto noswap;
1246 n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1248 atomic_long_sub(n_goal * size, &nr_swap_pages);
1250 start_over:
1251 node = numa_node_id();
1252 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1253 /* requeue si to after same-priority siblings */
1254 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1255 spin_unlock(&swap_avail_lock);
1256 if (get_swap_device_info(si)) {
1257 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1258 n_goal, swp_entries, order);
1259 put_swap_device(si);
1260 if (n_ret || size > 1)
1261 goto check_out;
1264 spin_lock(&swap_avail_lock);
1266 * if we got here, it's likely that si was almost full before,
1267 * and since scan_swap_map_slots() can drop the si->lock,
1268 * multiple callers probably all tried to get a page from the
1269 * same si and it filled up before we could get one; or, the si
1270 * filled up between us dropping swap_avail_lock and taking
1271 * si->lock. Since we dropped the swap_avail_lock, the
1272 * swap_avail_head list may have been modified; so if next is
1273 * still in the swap_avail_head list then try it, otherwise
1274 * start over if we have not gotten any slots.
1276 if (plist_node_empty(&next->avail_lists[node]))
1277 goto start_over;
1280 spin_unlock(&swap_avail_lock);
1282 check_out:
1283 if (n_ret < n_goal)
1284 atomic_long_add((long)(n_goal - n_ret) * size,
1285 &nr_swap_pages);
1286 noswap:
1287 return n_ret;
1290 static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1292 struct swap_info_struct *si;
1293 unsigned long offset;
1295 if (!entry.val)
1296 goto out;
1297 si = swp_swap_info(entry);
1298 if (!si)
1299 goto bad_nofile;
1300 if (data_race(!(si->flags & SWP_USED)))
1301 goto bad_device;
1302 offset = swp_offset(entry);
1303 if (offset >= si->max)
1304 goto bad_offset;
1305 if (data_race(!si->swap_map[swp_offset(entry)]))
1306 goto bad_free;
1307 return si;
1309 bad_free:
1310 pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1311 goto out;
1312 bad_offset:
1313 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1314 goto out;
1315 bad_device:
1316 pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1317 goto out;
1318 bad_nofile:
1319 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1320 out:
1321 return NULL;
1324 static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
1325 unsigned long offset,
1326 unsigned char usage)
1328 unsigned char count;
1329 unsigned char has_cache;
1331 count = si->swap_map[offset];
1333 has_cache = count & SWAP_HAS_CACHE;
1334 count &= ~SWAP_HAS_CACHE;
1336 if (usage == SWAP_HAS_CACHE) {
1337 VM_BUG_ON(!has_cache);
1338 has_cache = 0;
1339 } else if (count == SWAP_MAP_SHMEM) {
1341 * Or we could insist on shmem.c using a special
1342 * swap_shmem_free() and free_shmem_swap_and_cache()...
1344 count = 0;
1345 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1346 if (count == COUNT_CONTINUED) {
1347 if (swap_count_continued(si, offset, count))
1348 count = SWAP_MAP_MAX | COUNT_CONTINUED;
1349 else
1350 count = SWAP_MAP_MAX;
1351 } else
1352 count--;
1355 usage = count | has_cache;
1356 if (usage)
1357 WRITE_ONCE(si->swap_map[offset], usage);
1358 else
1359 WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
1361 return usage;
1365 * When we get a swap entry, if there aren't some other ways to
1366 * prevent swapoff, such as the folio in swap cache is locked, RCU
1367 * reader side is locked, etc., the swap entry may become invalid
1368 * because of swapoff. Then, we need to enclose all swap related
1369 * functions with get_swap_device() and put_swap_device(), unless the
1370 * swap functions call get/put_swap_device() by themselves.
1372 * RCU reader side lock (including any spinlock) is sufficient to
1373 * prevent swapoff, because synchronize_rcu() is called in swapoff()
1374 * before freeing data structures.
1376 * Check whether swap entry is valid in the swap device. If so,
1377 * return pointer to swap_info_struct, and keep the swap entry valid
1378 * via preventing the swap device from being swapoff, until
1379 * put_swap_device() is called. Otherwise return NULL.
1381 * Notice that swapoff or swapoff+swapon can still happen before the
1382 * percpu_ref_tryget_live() in get_swap_device() or after the
1383 * percpu_ref_put() in put_swap_device() if there isn't any other way
1384 * to prevent swapoff. The caller must be prepared for that. For
1385 * example, the following situation is possible.
1387 * CPU1 CPU2
1388 * do_swap_page()
1389 * ... swapoff+swapon
1390 * __read_swap_cache_async()
1391 * swapcache_prepare()
1392 * __swap_duplicate()
1393 * // check swap_map
1394 * // verify PTE not changed
1396 * In __swap_duplicate(), the swap_map need to be checked before
1397 * changing partly because the specified swap entry may be for another
1398 * swap device which has been swapoff. And in do_swap_page(), after
1399 * the page is read from the swap device, the PTE is verified not
1400 * changed with the page table locked to check whether the swap device
1401 * has been swapoff or swapoff+swapon.
1403 struct swap_info_struct *get_swap_device(swp_entry_t entry)
1405 struct swap_info_struct *si;
1406 unsigned long offset;
1408 if (!entry.val)
1409 goto out;
1410 si = swp_swap_info(entry);
1411 if (!si)
1412 goto bad_nofile;
1413 if (!get_swap_device_info(si))
1414 goto out;
1415 offset = swp_offset(entry);
1416 if (offset >= si->max)
1417 goto put_out;
1419 return si;
1420 bad_nofile:
1421 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1422 out:
1423 return NULL;
1424 put_out:
1425 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1426 percpu_ref_put(&si->users);
1427 return NULL;
1430 static unsigned char __swap_entry_free(struct swap_info_struct *si,
1431 swp_entry_t entry)
1433 struct swap_cluster_info *ci;
1434 unsigned long offset = swp_offset(entry);
1435 unsigned char usage;
1437 ci = lock_cluster(si, offset);
1438 usage = __swap_entry_free_locked(si, offset, 1);
1439 if (!usage)
1440 swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
1441 unlock_cluster(ci);
1443 return usage;
1446 static bool __swap_entries_free(struct swap_info_struct *si,
1447 swp_entry_t entry, int nr)
1449 unsigned long offset = swp_offset(entry);
1450 unsigned int type = swp_type(entry);
1451 struct swap_cluster_info *ci;
1452 bool has_cache = false;
1453 unsigned char count;
1454 int i;
1456 if (nr <= 1 || swap_count(data_race(si->swap_map[offset])) != 1)
1457 goto fallback;
1458 /* cross into another cluster */
1459 if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
1460 goto fallback;
1462 ci = lock_cluster(si, offset);
1463 if (!swap_is_last_map(si, offset, nr, &has_cache)) {
1464 unlock_cluster(ci);
1465 goto fallback;
1467 for (i = 0; i < nr; i++)
1468 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
1469 if (!has_cache)
1470 swap_entry_range_free(si, ci, entry, nr);
1471 unlock_cluster(ci);
1473 return has_cache;
1475 fallback:
1476 for (i = 0; i < nr; i++) {
1477 if (data_race(si->swap_map[offset + i])) {
1478 count = __swap_entry_free(si, swp_entry(type, offset + i));
1479 if (count == SWAP_HAS_CACHE)
1480 has_cache = true;
1481 } else {
1482 WARN_ON_ONCE(1);
1485 return has_cache;
1489 * Drop the last HAS_CACHE flag of swap entries, caller have to
1490 * ensure all entries belong to the same cgroup.
1492 static void swap_entry_range_free(struct swap_info_struct *si,
1493 struct swap_cluster_info *ci,
1494 swp_entry_t entry, unsigned int nr_pages)
1496 unsigned long offset = swp_offset(entry);
1497 unsigned char *map = si->swap_map + offset;
1498 unsigned char *map_end = map + nr_pages;
1500 /* It should never free entries across different clusters */
1501 VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
1502 VM_BUG_ON(cluster_is_empty(ci));
1503 VM_BUG_ON(ci->count < nr_pages);
1505 ci->count -= nr_pages;
1506 do {
1507 VM_BUG_ON(*map != SWAP_HAS_CACHE);
1508 *map = 0;
1509 } while (++map < map_end);
1511 mem_cgroup_uncharge_swap(entry, nr_pages);
1512 swap_range_free(si, offset, nr_pages);
1514 if (!ci->count)
1515 free_cluster(si, ci);
1516 else
1517 partial_free_cluster(si, ci);
1520 static void cluster_swap_free_nr(struct swap_info_struct *si,
1521 unsigned long offset, int nr_pages,
1522 unsigned char usage)
1524 struct swap_cluster_info *ci;
1525 unsigned long end = offset + nr_pages;
1527 ci = lock_cluster(si, offset);
1528 do {
1529 if (!__swap_entry_free_locked(si, offset, usage))
1530 swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
1531 } while (++offset < end);
1532 unlock_cluster(ci);
1536 * Caller has made sure that the swap device corresponding to entry
1537 * is still around or has not been recycled.
1539 void swap_free_nr(swp_entry_t entry, int nr_pages)
1541 int nr;
1542 struct swap_info_struct *sis;
1543 unsigned long offset = swp_offset(entry);
1545 sis = _swap_info_get(entry);
1546 if (!sis)
1547 return;
1549 while (nr_pages) {
1550 nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
1551 cluster_swap_free_nr(sis, offset, nr, 1);
1552 offset += nr;
1553 nr_pages -= nr;
1558 * Called after dropping swapcache to decrease refcnt to swap entries.
1560 void put_swap_folio(struct folio *folio, swp_entry_t entry)
1562 unsigned long offset = swp_offset(entry);
1563 struct swap_cluster_info *ci;
1564 struct swap_info_struct *si;
1565 int size = 1 << swap_entry_order(folio_order(folio));
1567 si = _swap_info_get(entry);
1568 if (!si)
1569 return;
1571 ci = lock_cluster(si, offset);
1572 if (swap_is_has_cache(si, offset, size))
1573 swap_entry_range_free(si, ci, entry, size);
1574 else {
1575 for (int i = 0; i < size; i++, entry.val++) {
1576 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE))
1577 swap_entry_range_free(si, ci, entry, 1);
1580 unlock_cluster(ci);
1583 void swapcache_free_entries(swp_entry_t *entries, int n)
1585 int i;
1586 struct swap_cluster_info *ci;
1587 struct swap_info_struct *si = NULL;
1589 if (n <= 0)
1590 return;
1592 for (i = 0; i < n; ++i) {
1593 si = _swap_info_get(entries[i]);
1594 if (si) {
1595 ci = lock_cluster(si, swp_offset(entries[i]));
1596 swap_entry_range_free(si, ci, entries[i], 1);
1597 unlock_cluster(ci);
1602 int __swap_count(swp_entry_t entry)
1604 struct swap_info_struct *si = swp_swap_info(entry);
1605 pgoff_t offset = swp_offset(entry);
1607 return swap_count(si->swap_map[offset]);
1611 * How many references to @entry are currently swapped out?
1612 * This does not give an exact answer when swap count is continued,
1613 * but does include the high COUNT_CONTINUED flag to allow for that.
1615 int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1617 pgoff_t offset = swp_offset(entry);
1618 struct swap_cluster_info *ci;
1619 int count;
1621 ci = lock_cluster(si, offset);
1622 count = swap_count(si->swap_map[offset]);
1623 unlock_cluster(ci);
1624 return count;
1628 * How many references to @entry are currently swapped out?
1629 * This considers COUNT_CONTINUED so it returns exact answer.
1631 int swp_swapcount(swp_entry_t entry)
1633 int count, tmp_count, n;
1634 struct swap_info_struct *si;
1635 struct swap_cluster_info *ci;
1636 struct page *page;
1637 pgoff_t offset;
1638 unsigned char *map;
1640 si = _swap_info_get(entry);
1641 if (!si)
1642 return 0;
1644 offset = swp_offset(entry);
1646 ci = lock_cluster(si, offset);
1648 count = swap_count(si->swap_map[offset]);
1649 if (!(count & COUNT_CONTINUED))
1650 goto out;
1652 count &= ~COUNT_CONTINUED;
1653 n = SWAP_MAP_MAX + 1;
1655 page = vmalloc_to_page(si->swap_map + offset);
1656 offset &= ~PAGE_MASK;
1657 VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1659 do {
1660 page = list_next_entry(page, lru);
1661 map = kmap_local_page(page);
1662 tmp_count = map[offset];
1663 kunmap_local(map);
1665 count += (tmp_count & ~COUNT_CONTINUED) * n;
1666 n *= (SWAP_CONT_MAX + 1);
1667 } while (tmp_count & COUNT_CONTINUED);
1668 out:
1669 unlock_cluster(ci);
1670 return count;
1673 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1674 swp_entry_t entry, int order)
1676 struct swap_cluster_info *ci;
1677 unsigned char *map = si->swap_map;
1678 unsigned int nr_pages = 1 << order;
1679 unsigned long roffset = swp_offset(entry);
1680 unsigned long offset = round_down(roffset, nr_pages);
1681 int i;
1682 bool ret = false;
1684 ci = lock_cluster(si, offset);
1685 if (nr_pages == 1) {
1686 if (swap_count(map[roffset]))
1687 ret = true;
1688 goto unlock_out;
1690 for (i = 0; i < nr_pages; i++) {
1691 if (swap_count(map[offset + i])) {
1692 ret = true;
1693 break;
1696 unlock_out:
1697 unlock_cluster(ci);
1698 return ret;
1701 static bool folio_swapped(struct folio *folio)
1703 swp_entry_t entry = folio->swap;
1704 struct swap_info_struct *si = _swap_info_get(entry);
1706 if (!si)
1707 return false;
1709 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
1710 return swap_swapcount(si, entry) != 0;
1712 return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
1715 static bool folio_swapcache_freeable(struct folio *folio)
1717 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1719 if (!folio_test_swapcache(folio))
1720 return false;
1721 if (folio_test_writeback(folio))
1722 return false;
1725 * Once hibernation has begun to create its image of memory,
1726 * there's a danger that one of the calls to folio_free_swap()
1727 * - most probably a call from __try_to_reclaim_swap() while
1728 * hibernation is allocating its own swap pages for the image,
1729 * but conceivably even a call from memory reclaim - will free
1730 * the swap from a folio which has already been recorded in the
1731 * image as a clean swapcache folio, and then reuse its swap for
1732 * another page of the image. On waking from hibernation, the
1733 * original folio might be freed under memory pressure, then
1734 * later read back in from swap, now with the wrong data.
1736 * Hibernation suspends storage while it is writing the image
1737 * to disk so check that here.
1739 if (pm_suspended_storage())
1740 return false;
1742 return true;
1746 * folio_free_swap() - Free the swap space used for this folio.
1747 * @folio: The folio to remove.
1749 * If swap is getting full, or if there are no more mappings of this folio,
1750 * then call folio_free_swap to free its swap space.
1752 * Return: true if we were able to release the swap space.
1754 bool folio_free_swap(struct folio *folio)
1756 if (!folio_swapcache_freeable(folio))
1757 return false;
1758 if (folio_swapped(folio))
1759 return false;
1761 delete_from_swap_cache(folio);
1762 folio_set_dirty(folio);
1763 return true;
1767 * free_swap_and_cache_nr() - Release reference on range of swap entries and
1768 * reclaim their cache if no more references remain.
1769 * @entry: First entry of range.
1770 * @nr: Number of entries in range.
1772 * For each swap entry in the contiguous range, release a reference. If any swap
1773 * entries become free, try to reclaim their underlying folios, if present. The
1774 * offset range is defined by [entry.offset, entry.offset + nr).
1776 void free_swap_and_cache_nr(swp_entry_t entry, int nr)
1778 const unsigned long start_offset = swp_offset(entry);
1779 const unsigned long end_offset = start_offset + nr;
1780 struct swap_info_struct *si;
1781 bool any_only_cache = false;
1782 unsigned long offset;
1784 if (non_swap_entry(entry))
1785 return;
1787 si = get_swap_device(entry);
1788 if (!si)
1789 return;
1791 if (WARN_ON(end_offset > si->max))
1792 goto out;
1795 * First free all entries in the range.
1797 any_only_cache = __swap_entries_free(si, entry, nr);
1800 * Short-circuit the below loop if none of the entries had their
1801 * reference drop to zero.
1803 if (!any_only_cache)
1804 goto out;
1807 * Now go back over the range trying to reclaim the swap cache. This is
1808 * more efficient for large folios because we will only try to reclaim
1809 * the swap once per folio in the common case. If we do
1810 * __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the
1811 * latter will get a reference and lock the folio for every individual
1812 * page but will only succeed once the swap slot for every subpage is
1813 * zero.
1815 for (offset = start_offset; offset < end_offset; offset += nr) {
1816 nr = 1;
1817 if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
1819 * Folios are always naturally aligned in swap so
1820 * advance forward to the next boundary. Zero means no
1821 * folio was found for the swap entry, so advance by 1
1822 * in this case. Negative value means folio was found
1823 * but could not be reclaimed. Here we can still advance
1824 * to the next boundary.
1826 nr = __try_to_reclaim_swap(si, offset,
1827 TTRS_UNMAPPED | TTRS_FULL);
1828 if (nr == 0)
1829 nr = 1;
1830 else if (nr < 0)
1831 nr = -nr;
1832 nr = ALIGN(offset + 1, nr) - offset;
1836 out:
1837 put_swap_device(si);
1840 #ifdef CONFIG_HIBERNATION
1842 swp_entry_t get_swap_page_of_type(int type)
1844 struct swap_info_struct *si = swap_type_to_swap_info(type);
1845 swp_entry_t entry = {0};
1847 if (!si)
1848 goto fail;
1850 /* This is called for allocating swap entry, not cache */
1851 if (get_swap_device_info(si)) {
1852 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0))
1853 atomic_long_dec(&nr_swap_pages);
1854 put_swap_device(si);
1856 fail:
1857 return entry;
1861 * Find the swap type that corresponds to given device (if any).
1863 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1864 * from 0, in which the swap header is expected to be located.
1866 * This is needed for the suspend to disk (aka swsusp).
1868 int swap_type_of(dev_t device, sector_t offset)
1870 int type;
1872 if (!device)
1873 return -1;
1875 spin_lock(&swap_lock);
1876 for (type = 0; type < nr_swapfiles; type++) {
1877 struct swap_info_struct *sis = swap_info[type];
1879 if (!(sis->flags & SWP_WRITEOK))
1880 continue;
1882 if (device == sis->bdev->bd_dev) {
1883 struct swap_extent *se = first_se(sis);
1885 if (se->start_block == offset) {
1886 spin_unlock(&swap_lock);
1887 return type;
1891 spin_unlock(&swap_lock);
1892 return -ENODEV;
1895 int find_first_swap(dev_t *device)
1897 int type;
1899 spin_lock(&swap_lock);
1900 for (type = 0; type < nr_swapfiles; type++) {
1901 struct swap_info_struct *sis = swap_info[type];
1903 if (!(sis->flags & SWP_WRITEOK))
1904 continue;
1905 *device = sis->bdev->bd_dev;
1906 spin_unlock(&swap_lock);
1907 return type;
1909 spin_unlock(&swap_lock);
1910 return -ENODEV;
1914 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1915 * corresponding to given index in swap_info (swap type).
1917 sector_t swapdev_block(int type, pgoff_t offset)
1919 struct swap_info_struct *si = swap_type_to_swap_info(type);
1920 struct swap_extent *se;
1922 if (!si || !(si->flags & SWP_WRITEOK))
1923 return 0;
1924 se = offset_to_swap_extent(si, offset);
1925 return se->start_block + (offset - se->start_page);
1929 * Return either the total number of swap pages of given type, or the number
1930 * of free pages of that type (depending on @free)
1932 * This is needed for software suspend
1934 unsigned int count_swap_pages(int type, int free)
1936 unsigned int n = 0;
1938 spin_lock(&swap_lock);
1939 if ((unsigned int)type < nr_swapfiles) {
1940 struct swap_info_struct *sis = swap_info[type];
1942 spin_lock(&sis->lock);
1943 if (sis->flags & SWP_WRITEOK) {
1944 n = sis->pages;
1945 if (free)
1946 n -= swap_usage_in_pages(sis);
1948 spin_unlock(&sis->lock);
1950 spin_unlock(&swap_lock);
1951 return n;
1953 #endif /* CONFIG_HIBERNATION */
1955 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1957 return pte_same(pte_swp_clear_flags(pte), swp_pte);
1961 * No need to decide whether this PTE shares the swap entry with others,
1962 * just let do_wp_page work it out if a write is requested later - to
1963 * force COW, vm_page_prot omits write permission from any private vma.
1965 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1966 unsigned long addr, swp_entry_t entry, struct folio *folio)
1968 struct page *page;
1969 struct folio *swapcache;
1970 spinlock_t *ptl;
1971 pte_t *pte, new_pte, old_pte;
1972 bool hwpoisoned = false;
1973 int ret = 1;
1975 swapcache = folio;
1976 folio = ksm_might_need_to_copy(folio, vma, addr);
1977 if (unlikely(!folio))
1978 return -ENOMEM;
1979 else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
1980 hwpoisoned = true;
1981 folio = swapcache;
1984 page = folio_file_page(folio, swp_offset(entry));
1985 if (PageHWPoison(page))
1986 hwpoisoned = true;
1988 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1989 if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
1990 swp_entry_to_pte(entry)))) {
1991 ret = 0;
1992 goto out;
1995 old_pte = ptep_get(pte);
1997 if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
1998 swp_entry_t swp_entry;
2000 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2001 if (hwpoisoned) {
2002 swp_entry = make_hwpoison_entry(page);
2003 } else {
2004 swp_entry = make_poisoned_swp_entry();
2006 new_pte = swp_entry_to_pte(swp_entry);
2007 ret = 0;
2008 goto setpte;
2012 * Some architectures may have to restore extra metadata to the page
2013 * when reading from swap. This metadata may be indexed by swap entry
2014 * so this must be called before swap_free().
2016 arch_swap_restore(folio_swap(entry, folio), folio);
2018 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2019 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
2020 folio_get(folio);
2021 if (folio == swapcache) {
2022 rmap_t rmap_flags = RMAP_NONE;
2025 * See do_swap_page(): writeback would be problematic.
2026 * However, we do a folio_wait_writeback() just before this
2027 * call and have the folio locked.
2029 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
2030 if (pte_swp_exclusive(old_pte))
2031 rmap_flags |= RMAP_EXCLUSIVE;
2033 * We currently only expect small !anon folios, which are either
2034 * fully exclusive or fully shared. If we ever get large folios
2035 * here, we have to be careful.
2037 if (!folio_test_anon(folio)) {
2038 VM_WARN_ON_ONCE(folio_test_large(folio));
2039 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2040 folio_add_new_anon_rmap(folio, vma, addr, rmap_flags);
2041 } else {
2042 folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
2044 } else { /* ksm created a completely new copy */
2045 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
2046 folio_add_lru_vma(folio, vma);
2048 new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
2049 if (pte_swp_soft_dirty(old_pte))
2050 new_pte = pte_mksoft_dirty(new_pte);
2051 if (pte_swp_uffd_wp(old_pte))
2052 new_pte = pte_mkuffd_wp(new_pte);
2053 setpte:
2054 set_pte_at(vma->vm_mm, addr, pte, new_pte);
2055 swap_free(entry);
2056 out:
2057 if (pte)
2058 pte_unmap_unlock(pte, ptl);
2059 if (folio != swapcache) {
2060 folio_unlock(folio);
2061 folio_put(folio);
2063 return ret;
2066 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
2067 unsigned long addr, unsigned long end,
2068 unsigned int type)
2070 pte_t *pte = NULL;
2071 struct swap_info_struct *si;
2073 si = swap_info[type];
2074 do {
2075 struct folio *folio;
2076 unsigned long offset;
2077 unsigned char swp_count;
2078 swp_entry_t entry;
2079 int ret;
2080 pte_t ptent;
2082 if (!pte++) {
2083 pte = pte_offset_map(pmd, addr);
2084 if (!pte)
2085 break;
2088 ptent = ptep_get_lockless(pte);
2090 if (!is_swap_pte(ptent))
2091 continue;
2093 entry = pte_to_swp_entry(ptent);
2094 if (swp_type(entry) != type)
2095 continue;
2097 offset = swp_offset(entry);
2098 pte_unmap(pte);
2099 pte = NULL;
2101 folio = swap_cache_get_folio(entry, vma, addr);
2102 if (!folio) {
2103 struct vm_fault vmf = {
2104 .vma = vma,
2105 .address = addr,
2106 .real_address = addr,
2107 .pmd = pmd,
2110 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
2111 &vmf);
2113 if (!folio) {
2114 swp_count = READ_ONCE(si->swap_map[offset]);
2115 if (swp_count == 0 || swp_count == SWAP_MAP_BAD)
2116 continue;
2117 return -ENOMEM;
2120 folio_lock(folio);
2121 folio_wait_writeback(folio);
2122 ret = unuse_pte(vma, pmd, addr, entry, folio);
2123 if (ret < 0) {
2124 folio_unlock(folio);
2125 folio_put(folio);
2126 return ret;
2129 folio_free_swap(folio);
2130 folio_unlock(folio);
2131 folio_put(folio);
2132 } while (addr += PAGE_SIZE, addr != end);
2134 if (pte)
2135 pte_unmap(pte);
2136 return 0;
2139 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2140 unsigned long addr, unsigned long end,
2141 unsigned int type)
2143 pmd_t *pmd;
2144 unsigned long next;
2145 int ret;
2147 pmd = pmd_offset(pud, addr);
2148 do {
2149 cond_resched();
2150 next = pmd_addr_end(addr, end);
2151 ret = unuse_pte_range(vma, pmd, addr, next, type);
2152 if (ret)
2153 return ret;
2154 } while (pmd++, addr = next, addr != end);
2155 return 0;
2158 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2159 unsigned long addr, unsigned long end,
2160 unsigned int type)
2162 pud_t *pud;
2163 unsigned long next;
2164 int ret;
2166 pud = pud_offset(p4d, addr);
2167 do {
2168 next = pud_addr_end(addr, end);
2169 if (pud_none_or_clear_bad(pud))
2170 continue;
2171 ret = unuse_pmd_range(vma, pud, addr, next, type);
2172 if (ret)
2173 return ret;
2174 } while (pud++, addr = next, addr != end);
2175 return 0;
2178 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2179 unsigned long addr, unsigned long end,
2180 unsigned int type)
2182 p4d_t *p4d;
2183 unsigned long next;
2184 int ret;
2186 p4d = p4d_offset(pgd, addr);
2187 do {
2188 next = p4d_addr_end(addr, end);
2189 if (p4d_none_or_clear_bad(p4d))
2190 continue;
2191 ret = unuse_pud_range(vma, p4d, addr, next, type);
2192 if (ret)
2193 return ret;
2194 } while (p4d++, addr = next, addr != end);
2195 return 0;
2198 static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
2200 pgd_t *pgd;
2201 unsigned long addr, end, next;
2202 int ret;
2204 addr = vma->vm_start;
2205 end = vma->vm_end;
2207 pgd = pgd_offset(vma->vm_mm, addr);
2208 do {
2209 next = pgd_addr_end(addr, end);
2210 if (pgd_none_or_clear_bad(pgd))
2211 continue;
2212 ret = unuse_p4d_range(vma, pgd, addr, next, type);
2213 if (ret)
2214 return ret;
2215 } while (pgd++, addr = next, addr != end);
2216 return 0;
2219 static int unuse_mm(struct mm_struct *mm, unsigned int type)
2221 struct vm_area_struct *vma;
2222 int ret = 0;
2223 VMA_ITERATOR(vmi, mm, 0);
2225 mmap_read_lock(mm);
2226 for_each_vma(vmi, vma) {
2227 if (vma->anon_vma && !is_vm_hugetlb_page(vma)) {
2228 ret = unuse_vma(vma, type);
2229 if (ret)
2230 break;
2233 cond_resched();
2235 mmap_read_unlock(mm);
2236 return ret;
2240 * Scan swap_map from current position to next entry still in use.
2241 * Return 0 if there are no inuse entries after prev till end of
2242 * the map.
2244 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2245 unsigned int prev)
2247 unsigned int i;
2248 unsigned char count;
2251 * No need for swap_lock here: we're just looking
2252 * for whether an entry is in use, not modifying it; false
2253 * hits are okay, and sys_swapoff() has already prevented new
2254 * allocations from this area (while holding swap_lock).
2256 for (i = prev + 1; i < si->max; i++) {
2257 count = READ_ONCE(si->swap_map[i]);
2258 if (count && swap_count(count) != SWAP_MAP_BAD)
2259 break;
2260 if ((i % LATENCY_LIMIT) == 0)
2261 cond_resched();
2264 if (i == si->max)
2265 i = 0;
2267 return i;
2270 static int try_to_unuse(unsigned int type)
2272 struct mm_struct *prev_mm;
2273 struct mm_struct *mm;
2274 struct list_head *p;
2275 int retval = 0;
2276 struct swap_info_struct *si = swap_info[type];
2277 struct folio *folio;
2278 swp_entry_t entry;
2279 unsigned int i;
2281 if (!swap_usage_in_pages(si))
2282 goto success;
2284 retry:
2285 retval = shmem_unuse(type);
2286 if (retval)
2287 return retval;
2289 prev_mm = &init_mm;
2290 mmget(prev_mm);
2292 spin_lock(&mmlist_lock);
2293 p = &init_mm.mmlist;
2294 while (swap_usage_in_pages(si) &&
2295 !signal_pending(current) &&
2296 (p = p->next) != &init_mm.mmlist) {
2298 mm = list_entry(p, struct mm_struct, mmlist);
2299 if (!mmget_not_zero(mm))
2300 continue;
2301 spin_unlock(&mmlist_lock);
2302 mmput(prev_mm);
2303 prev_mm = mm;
2304 retval = unuse_mm(mm, type);
2305 if (retval) {
2306 mmput(prev_mm);
2307 return retval;
2311 * Make sure that we aren't completely killing
2312 * interactive performance.
2314 cond_resched();
2315 spin_lock(&mmlist_lock);
2317 spin_unlock(&mmlist_lock);
2319 mmput(prev_mm);
2321 i = 0;
2322 while (swap_usage_in_pages(si) &&
2323 !signal_pending(current) &&
2324 (i = find_next_to_unuse(si, i)) != 0) {
2326 entry = swp_entry(type, i);
2327 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
2328 if (IS_ERR(folio))
2329 continue;
2332 * It is conceivable that a racing task removed this folio from
2333 * swap cache just before we acquired the page lock. The folio
2334 * might even be back in swap cache on another swap area. But
2335 * that is okay, folio_free_swap() only removes stale folios.
2337 folio_lock(folio);
2338 folio_wait_writeback(folio);
2339 folio_free_swap(folio);
2340 folio_unlock(folio);
2341 folio_put(folio);
2345 * Lets check again to see if there are still swap entries in the map.
2346 * If yes, we would need to do retry the unuse logic again.
2347 * Under global memory pressure, swap entries can be reinserted back
2348 * into process space after the mmlist loop above passes over them.
2350 * Limit the number of retries? No: when mmget_not_zero()
2351 * above fails, that mm is likely to be freeing swap from
2352 * exit_mmap(), which proceeds at its own independent pace;
2353 * and even shmem_writepage() could have been preempted after
2354 * folio_alloc_swap(), temporarily hiding that swap. It's easy
2355 * and robust (though cpu-intensive) just to keep retrying.
2357 if (swap_usage_in_pages(si)) {
2358 if (!signal_pending(current))
2359 goto retry;
2360 return -EINTR;
2363 success:
2365 * Make sure that further cleanups after try_to_unuse() returns happen
2366 * after swap_range_free() reduces si->inuse_pages to 0.
2368 smp_mb();
2369 return 0;
2373 * After a successful try_to_unuse, if no swap is now in use, we know
2374 * we can empty the mmlist. swap_lock must be held on entry and exit.
2375 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2376 * added to the mmlist just after page_duplicate - before would be racy.
2378 static void drain_mmlist(void)
2380 struct list_head *p, *next;
2381 unsigned int type;
2383 for (type = 0; type < nr_swapfiles; type++)
2384 if (swap_usage_in_pages(swap_info[type]))
2385 return;
2386 spin_lock(&mmlist_lock);
2387 list_for_each_safe(p, next, &init_mm.mmlist)
2388 list_del_init(p);
2389 spin_unlock(&mmlist_lock);
2393 * Free all of a swapdev's extent information
2395 static void destroy_swap_extents(struct swap_info_struct *sis)
2397 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2398 struct rb_node *rb = sis->swap_extent_root.rb_node;
2399 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2401 rb_erase(rb, &sis->swap_extent_root);
2402 kfree(se);
2405 if (sis->flags & SWP_ACTIVATED) {
2406 struct file *swap_file = sis->swap_file;
2407 struct address_space *mapping = swap_file->f_mapping;
2409 sis->flags &= ~SWP_ACTIVATED;
2410 if (mapping->a_ops->swap_deactivate)
2411 mapping->a_ops->swap_deactivate(swap_file);
2416 * Add a block range (and the corresponding page range) into this swapdev's
2417 * extent tree.
2419 * This function rather assumes that it is called in ascending page order.
2422 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2423 unsigned long nr_pages, sector_t start_block)
2425 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2426 struct swap_extent *se;
2427 struct swap_extent *new_se;
2430 * place the new node at the right most since the
2431 * function is called in ascending page order.
2433 while (*link) {
2434 parent = *link;
2435 link = &parent->rb_right;
2438 if (parent) {
2439 se = rb_entry(parent, struct swap_extent, rb_node);
2440 BUG_ON(se->start_page + se->nr_pages != start_page);
2441 if (se->start_block + se->nr_pages == start_block) {
2442 /* Merge it */
2443 se->nr_pages += nr_pages;
2444 return 0;
2448 /* No merge, insert a new extent. */
2449 new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2450 if (new_se == NULL)
2451 return -ENOMEM;
2452 new_se->start_page = start_page;
2453 new_se->nr_pages = nr_pages;
2454 new_se->start_block = start_block;
2456 rb_link_node(&new_se->rb_node, parent, link);
2457 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2458 return 1;
2460 EXPORT_SYMBOL_GPL(add_swap_extent);
2463 * A `swap extent' is a simple thing which maps a contiguous range of pages
2464 * onto a contiguous range of disk blocks. A rbtree of swap extents is
2465 * built at swapon time and is then used at swap_writepage/swap_read_folio
2466 * time for locating where on disk a page belongs.
2468 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2469 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2470 * swap files identically.
2472 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2473 * extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
2474 * swapfiles are handled *identically* after swapon time.
2476 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2477 * and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray
2478 * blocks are found which do not fall within the PAGE_SIZE alignment
2479 * requirements, they are simply tossed out - we will never use those blocks
2480 * for swapping.
2482 * For all swap devices we set S_SWAPFILE across the life of the swapon. This
2483 * prevents users from writing to the swap device, which will corrupt memory.
2485 * The amount of disk space which a single swap extent represents varies.
2486 * Typically it is in the 1-4 megabyte range. So we can have hundreds of
2487 * extents in the rbtree. - akpm.
2489 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2491 struct file *swap_file = sis->swap_file;
2492 struct address_space *mapping = swap_file->f_mapping;
2493 struct inode *inode = mapping->host;
2494 int ret;
2496 if (S_ISBLK(inode->i_mode)) {
2497 ret = add_swap_extent(sis, 0, sis->max, 0);
2498 *span = sis->pages;
2499 return ret;
2502 if (mapping->a_ops->swap_activate) {
2503 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2504 if (ret < 0)
2505 return ret;
2506 sis->flags |= SWP_ACTIVATED;
2507 if ((sis->flags & SWP_FS_OPS) &&
2508 sio_pool_init() != 0) {
2509 destroy_swap_extents(sis);
2510 return -ENOMEM;
2512 return ret;
2515 return generic_swapfile_activate(sis, swap_file, span);
2518 static int swap_node(struct swap_info_struct *si)
2520 struct block_device *bdev;
2522 if (si->bdev)
2523 bdev = si->bdev;
2524 else
2525 bdev = si->swap_file->f_inode->i_sb->s_bdev;
2527 return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2530 static void setup_swap_info(struct swap_info_struct *si, int prio,
2531 unsigned char *swap_map,
2532 struct swap_cluster_info *cluster_info,
2533 unsigned long *zeromap)
2535 int i;
2537 if (prio >= 0)
2538 si->prio = prio;
2539 else
2540 si->prio = --least_priority;
2542 * the plist prio is negated because plist ordering is
2543 * low-to-high, while swap ordering is high-to-low
2545 si->list.prio = -si->prio;
2546 for_each_node(i) {
2547 if (si->prio >= 0)
2548 si->avail_lists[i].prio = -si->prio;
2549 else {
2550 if (swap_node(si) == i)
2551 si->avail_lists[i].prio = 1;
2552 else
2553 si->avail_lists[i].prio = -si->prio;
2556 si->swap_map = swap_map;
2557 si->cluster_info = cluster_info;
2558 si->zeromap = zeromap;
2561 static void _enable_swap_info(struct swap_info_struct *si)
2563 atomic_long_add(si->pages, &nr_swap_pages);
2564 total_swap_pages += si->pages;
2566 assert_spin_locked(&swap_lock);
2568 * both lists are plists, and thus priority ordered.
2569 * swap_active_head needs to be priority ordered for swapoff(),
2570 * which on removal of any swap_info_struct with an auto-assigned
2571 * (i.e. negative) priority increments the auto-assigned priority
2572 * of any lower-priority swap_info_structs.
2573 * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
2574 * which allocates swap pages from the highest available priority
2575 * swap_info_struct.
2577 plist_add(&si->list, &swap_active_head);
2579 /* Add back to available list */
2580 add_to_avail_list(si, true);
2583 static void enable_swap_info(struct swap_info_struct *si, int prio,
2584 unsigned char *swap_map,
2585 struct swap_cluster_info *cluster_info,
2586 unsigned long *zeromap)
2588 spin_lock(&swap_lock);
2589 spin_lock(&si->lock);
2590 setup_swap_info(si, prio, swap_map, cluster_info, zeromap);
2591 spin_unlock(&si->lock);
2592 spin_unlock(&swap_lock);
2594 * Finished initializing swap device, now it's safe to reference it.
2596 percpu_ref_resurrect(&si->users);
2597 spin_lock(&swap_lock);
2598 spin_lock(&si->lock);
2599 _enable_swap_info(si);
2600 spin_unlock(&si->lock);
2601 spin_unlock(&swap_lock);
2604 static void reinsert_swap_info(struct swap_info_struct *si)
2606 spin_lock(&swap_lock);
2607 spin_lock(&si->lock);
2608 setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap);
2609 _enable_swap_info(si);
2610 spin_unlock(&si->lock);
2611 spin_unlock(&swap_lock);
2614 static bool __has_usable_swap(void)
2616 return !plist_head_empty(&swap_active_head);
2619 bool has_usable_swap(void)
2621 bool ret;
2623 spin_lock(&swap_lock);
2624 ret = __has_usable_swap();
2625 spin_unlock(&swap_lock);
2626 return ret;
2630 * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range
2631 * see the updated flags, so there will be no more allocations.
2633 static void wait_for_allocation(struct swap_info_struct *si)
2635 unsigned long offset;
2636 unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER);
2637 struct swap_cluster_info *ci;
2639 BUG_ON(si->flags & SWP_WRITEOK);
2641 for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) {
2642 ci = lock_cluster(si, offset);
2643 unlock_cluster(ci);
2644 offset += SWAPFILE_CLUSTER;
2648 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2650 struct swap_info_struct *p = NULL;
2651 unsigned char *swap_map;
2652 unsigned long *zeromap;
2653 struct swap_cluster_info *cluster_info;
2654 struct file *swap_file, *victim;
2655 struct address_space *mapping;
2656 struct inode *inode;
2657 struct filename *pathname;
2658 int err, found = 0;
2660 if (!capable(CAP_SYS_ADMIN))
2661 return -EPERM;
2663 BUG_ON(!current->mm);
2665 pathname = getname(specialfile);
2666 if (IS_ERR(pathname))
2667 return PTR_ERR(pathname);
2669 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2670 err = PTR_ERR(victim);
2671 if (IS_ERR(victim))
2672 goto out;
2674 mapping = victim->f_mapping;
2675 spin_lock(&swap_lock);
2676 plist_for_each_entry(p, &swap_active_head, list) {
2677 if (p->flags & SWP_WRITEOK) {
2678 if (p->swap_file->f_mapping == mapping) {
2679 found = 1;
2680 break;
2684 if (!found) {
2685 err = -EINVAL;
2686 spin_unlock(&swap_lock);
2687 goto out_dput;
2689 if (!security_vm_enough_memory_mm(current->mm, p->pages))
2690 vm_unacct_memory(p->pages);
2691 else {
2692 err = -ENOMEM;
2693 spin_unlock(&swap_lock);
2694 goto out_dput;
2696 spin_lock(&p->lock);
2697 del_from_avail_list(p, true);
2698 if (p->prio < 0) {
2699 struct swap_info_struct *si = p;
2700 int nid;
2702 plist_for_each_entry_continue(si, &swap_active_head, list) {
2703 si->prio++;
2704 si->list.prio--;
2705 for_each_node(nid) {
2706 if (si->avail_lists[nid].prio != 1)
2707 si->avail_lists[nid].prio--;
2710 least_priority++;
2712 plist_del(&p->list, &swap_active_head);
2713 atomic_long_sub(p->pages, &nr_swap_pages);
2714 total_swap_pages -= p->pages;
2715 spin_unlock(&p->lock);
2716 spin_unlock(&swap_lock);
2718 wait_for_allocation(p);
2720 disable_swap_slots_cache_lock();
2722 set_current_oom_origin();
2723 err = try_to_unuse(p->type);
2724 clear_current_oom_origin();
2726 if (err) {
2727 /* re-insert swap space back into swap_list */
2728 reinsert_swap_info(p);
2729 reenable_swap_slots_cache_unlock();
2730 goto out_dput;
2733 reenable_swap_slots_cache_unlock();
2736 * Wait for swap operations protected by get/put_swap_device()
2737 * to complete. Because of synchronize_rcu() here, all swap
2738 * operations protected by RCU reader side lock (including any
2739 * spinlock) will be waited too. This makes it easy to
2740 * prevent folio_test_swapcache() and the following swap cache
2741 * operations from racing with swapoff.
2743 percpu_ref_kill(&p->users);
2744 synchronize_rcu();
2745 wait_for_completion(&p->comp);
2747 flush_work(&p->discard_work);
2748 flush_work(&p->reclaim_work);
2750 destroy_swap_extents(p);
2751 if (p->flags & SWP_CONTINUED)
2752 free_swap_count_continuations(p);
2754 if (!p->bdev || !bdev_nonrot(p->bdev))
2755 atomic_dec(&nr_rotate_swap);
2757 mutex_lock(&swapon_mutex);
2758 spin_lock(&swap_lock);
2759 spin_lock(&p->lock);
2760 drain_mmlist();
2762 swap_file = p->swap_file;
2763 p->swap_file = NULL;
2764 p->max = 0;
2765 swap_map = p->swap_map;
2766 p->swap_map = NULL;
2767 zeromap = p->zeromap;
2768 p->zeromap = NULL;
2769 cluster_info = p->cluster_info;
2770 p->cluster_info = NULL;
2771 spin_unlock(&p->lock);
2772 spin_unlock(&swap_lock);
2773 arch_swap_invalidate_area(p->type);
2774 zswap_swapoff(p->type);
2775 mutex_unlock(&swapon_mutex);
2776 free_percpu(p->percpu_cluster);
2777 p->percpu_cluster = NULL;
2778 kfree(p->global_cluster);
2779 p->global_cluster = NULL;
2780 vfree(swap_map);
2781 kvfree(zeromap);
2782 kvfree(cluster_info);
2783 /* Destroy swap account information */
2784 swap_cgroup_swapoff(p->type);
2785 exit_swap_address_space(p->type);
2787 inode = mapping->host;
2789 inode_lock(inode);
2790 inode->i_flags &= ~S_SWAPFILE;
2791 inode_unlock(inode);
2792 filp_close(swap_file, NULL);
2795 * Clear the SWP_USED flag after all resources are freed so that swapon
2796 * can reuse this swap_info in alloc_swap_info() safely. It is ok to
2797 * not hold p->lock after we cleared its SWP_WRITEOK.
2799 spin_lock(&swap_lock);
2800 p->flags = 0;
2801 spin_unlock(&swap_lock);
2803 err = 0;
2804 atomic_inc(&proc_poll_event);
2805 wake_up_interruptible(&proc_poll_wait);
2807 out_dput:
2808 filp_close(victim, NULL);
2809 out:
2810 putname(pathname);
2811 return err;
2814 #ifdef CONFIG_PROC_FS
2815 static __poll_t swaps_poll(struct file *file, poll_table *wait)
2817 struct seq_file *seq = file->private_data;
2819 poll_wait(file, &proc_poll_wait, wait);
2821 if (seq->poll_event != atomic_read(&proc_poll_event)) {
2822 seq->poll_event = atomic_read(&proc_poll_event);
2823 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2826 return EPOLLIN | EPOLLRDNORM;
2829 /* iterator */
2830 static void *swap_start(struct seq_file *swap, loff_t *pos)
2832 struct swap_info_struct *si;
2833 int type;
2834 loff_t l = *pos;
2836 mutex_lock(&swapon_mutex);
2838 if (!l)
2839 return SEQ_START_TOKEN;
2841 for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2842 if (!(si->flags & SWP_USED) || !si->swap_map)
2843 continue;
2844 if (!--l)
2845 return si;
2848 return NULL;
2851 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2853 struct swap_info_struct *si = v;
2854 int type;
2856 if (v == SEQ_START_TOKEN)
2857 type = 0;
2858 else
2859 type = si->type + 1;
2861 ++(*pos);
2862 for (; (si = swap_type_to_swap_info(type)); type++) {
2863 if (!(si->flags & SWP_USED) || !si->swap_map)
2864 continue;
2865 return si;
2868 return NULL;
2871 static void swap_stop(struct seq_file *swap, void *v)
2873 mutex_unlock(&swapon_mutex);
2876 static int swap_show(struct seq_file *swap, void *v)
2878 struct swap_info_struct *si = v;
2879 struct file *file;
2880 int len;
2881 unsigned long bytes, inuse;
2883 if (si == SEQ_START_TOKEN) {
2884 seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2885 return 0;
2888 bytes = K(si->pages);
2889 inuse = K(swap_usage_in_pages(si));
2891 file = si->swap_file;
2892 len = seq_file_path(swap, file, " \t\n\\");
2893 seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
2894 len < 40 ? 40 - len : 1, " ",
2895 S_ISBLK(file_inode(file)->i_mode) ?
2896 "partition" : "file\t",
2897 bytes, bytes < 10000000 ? "\t" : "",
2898 inuse, inuse < 10000000 ? "\t" : "",
2899 si->prio);
2900 return 0;
2903 static const struct seq_operations swaps_op = {
2904 .start = swap_start,
2905 .next = swap_next,
2906 .stop = swap_stop,
2907 .show = swap_show
2910 static int swaps_open(struct inode *inode, struct file *file)
2912 struct seq_file *seq;
2913 int ret;
2915 ret = seq_open(file, &swaps_op);
2916 if (ret)
2917 return ret;
2919 seq = file->private_data;
2920 seq->poll_event = atomic_read(&proc_poll_event);
2921 return 0;
2924 static const struct proc_ops swaps_proc_ops = {
2925 .proc_flags = PROC_ENTRY_PERMANENT,
2926 .proc_open = swaps_open,
2927 .proc_read = seq_read,
2928 .proc_lseek = seq_lseek,
2929 .proc_release = seq_release,
2930 .proc_poll = swaps_poll,
2933 static int __init procswaps_init(void)
2935 proc_create("swaps", 0, NULL, &swaps_proc_ops);
2936 return 0;
2938 __initcall(procswaps_init);
2939 #endif /* CONFIG_PROC_FS */
2941 #ifdef MAX_SWAPFILES_CHECK
2942 static int __init max_swapfiles_check(void)
2944 MAX_SWAPFILES_CHECK();
2945 return 0;
2947 late_initcall(max_swapfiles_check);
2948 #endif
2950 static struct swap_info_struct *alloc_swap_info(void)
2952 struct swap_info_struct *p;
2953 struct swap_info_struct *defer = NULL;
2954 unsigned int type;
2955 int i;
2957 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2958 if (!p)
2959 return ERR_PTR(-ENOMEM);
2961 if (percpu_ref_init(&p->users, swap_users_ref_free,
2962 PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2963 kvfree(p);
2964 return ERR_PTR(-ENOMEM);
2967 spin_lock(&swap_lock);
2968 for (type = 0; type < nr_swapfiles; type++) {
2969 if (!(swap_info[type]->flags & SWP_USED))
2970 break;
2972 if (type >= MAX_SWAPFILES) {
2973 spin_unlock(&swap_lock);
2974 percpu_ref_exit(&p->users);
2975 kvfree(p);
2976 return ERR_PTR(-EPERM);
2978 if (type >= nr_swapfiles) {
2979 p->type = type;
2981 * Publish the swap_info_struct after initializing it.
2982 * Note that kvzalloc() above zeroes all its fields.
2984 smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
2985 nr_swapfiles++;
2986 } else {
2987 defer = p;
2988 p = swap_info[type];
2990 * Do not memset this entry: a racing procfs swap_next()
2991 * would be relying on p->type to remain valid.
2994 p->swap_extent_root = RB_ROOT;
2995 plist_node_init(&p->list, 0);
2996 for_each_node(i)
2997 plist_node_init(&p->avail_lists[i], 0);
2998 p->flags = SWP_USED;
2999 spin_unlock(&swap_lock);
3000 if (defer) {
3001 percpu_ref_exit(&defer->users);
3002 kvfree(defer);
3004 spin_lock_init(&p->lock);
3005 spin_lock_init(&p->cont_lock);
3006 atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT);
3007 init_completion(&p->comp);
3009 return p;
3012 static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
3014 if (S_ISBLK(inode->i_mode)) {
3015 si->bdev = I_BDEV(inode);
3017 * Zoned block devices contain zones that have a sequential
3018 * write only restriction. Hence zoned block devices are not
3019 * suitable for swapping. Disallow them here.
3021 if (bdev_is_zoned(si->bdev))
3022 return -EINVAL;
3023 si->flags |= SWP_BLKDEV;
3024 } else if (S_ISREG(inode->i_mode)) {
3025 si->bdev = inode->i_sb->s_bdev;
3028 return 0;
3033 * Find out how many pages are allowed for a single swap device. There
3034 * are two limiting factors:
3035 * 1) the number of bits for the swap offset in the swp_entry_t type, and
3036 * 2) the number of bits in the swap pte, as defined by the different
3037 * architectures.
3039 * In order to find the largest possible bit mask, a swap entry with
3040 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
3041 * decoded to a swp_entry_t again, and finally the swap offset is
3042 * extracted.
3044 * This will mask all the bits from the initial ~0UL mask that can't
3045 * be encoded in either the swp_entry_t or the architecture definition
3046 * of a swap pte.
3048 unsigned long generic_max_swapfile_size(void)
3050 return swp_offset(pte_to_swp_entry(
3051 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
3054 /* Can be overridden by an architecture for additional checks. */
3055 __weak unsigned long arch_max_swapfile_size(void)
3057 return generic_max_swapfile_size();
3060 static unsigned long read_swap_header(struct swap_info_struct *si,
3061 union swap_header *swap_header,
3062 struct inode *inode)
3064 int i;
3065 unsigned long maxpages;
3066 unsigned long swapfilepages;
3067 unsigned long last_page;
3069 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
3070 pr_err("Unable to find swap-space signature\n");
3071 return 0;
3074 /* swap partition endianness hack... */
3075 if (swab32(swap_header->info.version) == 1) {
3076 swab32s(&swap_header->info.version);
3077 swab32s(&swap_header->info.last_page);
3078 swab32s(&swap_header->info.nr_badpages);
3079 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3080 return 0;
3081 for (i = 0; i < swap_header->info.nr_badpages; i++)
3082 swab32s(&swap_header->info.badpages[i]);
3084 /* Check the swap header's sub-version */
3085 if (swap_header->info.version != 1) {
3086 pr_warn("Unable to handle swap header version %d\n",
3087 swap_header->info.version);
3088 return 0;
3091 maxpages = swapfile_maximum_size;
3092 last_page = swap_header->info.last_page;
3093 if (!last_page) {
3094 pr_warn("Empty swap-file\n");
3095 return 0;
3097 if (last_page > maxpages) {
3098 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
3099 K(maxpages), K(last_page));
3101 if (maxpages > last_page) {
3102 maxpages = last_page + 1;
3103 /* p->max is an unsigned int: don't overflow it */
3104 if ((unsigned int)maxpages == 0)
3105 maxpages = UINT_MAX;
3108 if (!maxpages)
3109 return 0;
3110 swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3111 if (swapfilepages && maxpages > swapfilepages) {
3112 pr_warn("Swap area shorter than signature indicates\n");
3113 return 0;
3115 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3116 return 0;
3117 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3118 return 0;
3120 return maxpages;
3123 #define SWAP_CLUSTER_INFO_COLS \
3124 DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3125 #define SWAP_CLUSTER_SPACE_COLS \
3126 DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3127 #define SWAP_CLUSTER_COLS \
3128 max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3130 static int setup_swap_map_and_extents(struct swap_info_struct *si,
3131 union swap_header *swap_header,
3132 unsigned char *swap_map,
3133 unsigned long maxpages,
3134 sector_t *span)
3136 unsigned int nr_good_pages;
3137 unsigned long i;
3138 int nr_extents;
3140 nr_good_pages = maxpages - 1; /* omit header page */
3142 for (i = 0; i < swap_header->info.nr_badpages; i++) {
3143 unsigned int page_nr = swap_header->info.badpages[i];
3144 if (page_nr == 0 || page_nr > swap_header->info.last_page)
3145 return -EINVAL;
3146 if (page_nr < maxpages) {
3147 swap_map[page_nr] = SWAP_MAP_BAD;
3148 nr_good_pages--;
3152 if (nr_good_pages) {
3153 swap_map[0] = SWAP_MAP_BAD;
3154 si->max = maxpages;
3155 si->pages = nr_good_pages;
3156 nr_extents = setup_swap_extents(si, span);
3157 if (nr_extents < 0)
3158 return nr_extents;
3159 nr_good_pages = si->pages;
3161 if (!nr_good_pages) {
3162 pr_warn("Empty swap-file\n");
3163 return -EINVAL;
3166 return nr_extents;
3169 static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
3170 union swap_header *swap_header,
3171 unsigned long maxpages)
3173 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3174 struct swap_cluster_info *cluster_info;
3175 unsigned long i, j, k, idx;
3176 int cpu, err = -ENOMEM;
3178 cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
3179 if (!cluster_info)
3180 goto err;
3182 for (i = 0; i < nr_clusters; i++)
3183 spin_lock_init(&cluster_info[i].lock);
3185 if (si->flags & SWP_SOLIDSTATE) {
3186 si->percpu_cluster = alloc_percpu(struct percpu_cluster);
3187 if (!si->percpu_cluster)
3188 goto err_free;
3190 for_each_possible_cpu(cpu) {
3191 struct percpu_cluster *cluster;
3193 cluster = per_cpu_ptr(si->percpu_cluster, cpu);
3194 for (i = 0; i < SWAP_NR_ORDERS; i++)
3195 cluster->next[i] = SWAP_ENTRY_INVALID;
3196 local_lock_init(&cluster->lock);
3198 } else {
3199 si->global_cluster = kmalloc(sizeof(*si->global_cluster),
3200 GFP_KERNEL);
3201 if (!si->global_cluster)
3202 goto err_free;
3203 for (i = 0; i < SWAP_NR_ORDERS; i++)
3204 si->global_cluster->next[i] = SWAP_ENTRY_INVALID;
3205 spin_lock_init(&si->global_cluster_lock);
3209 * Mark unusable pages as unavailable. The clusters aren't
3210 * marked free yet, so no list operations are involved yet.
3212 * See setup_swap_map_and_extents(): header page, bad pages,
3213 * and the EOF part of the last cluster.
3215 inc_cluster_info_page(si, cluster_info, 0);
3216 for (i = 0; i < swap_header->info.nr_badpages; i++)
3217 inc_cluster_info_page(si, cluster_info,
3218 swap_header->info.badpages[i]);
3219 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3220 inc_cluster_info_page(si, cluster_info, i);
3222 INIT_LIST_HEAD(&si->free_clusters);
3223 INIT_LIST_HEAD(&si->full_clusters);
3224 INIT_LIST_HEAD(&si->discard_clusters);
3226 for (i = 0; i < SWAP_NR_ORDERS; i++) {
3227 INIT_LIST_HEAD(&si->nonfull_clusters[i]);
3228 INIT_LIST_HEAD(&si->frag_clusters[i]);
3229 atomic_long_set(&si->frag_cluster_nr[i], 0);
3233 * Reduce false cache line sharing between cluster_info and
3234 * sharing same address space.
3236 for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3237 j = k % SWAP_CLUSTER_COLS;
3238 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3239 struct swap_cluster_info *ci;
3240 idx = i * SWAP_CLUSTER_COLS + j;
3241 ci = cluster_info + idx;
3242 if (idx >= nr_clusters)
3243 continue;
3244 if (ci->count) {
3245 ci->flags = CLUSTER_FLAG_NONFULL;
3246 list_add_tail(&ci->list, &si->nonfull_clusters[0]);
3247 continue;
3249 ci->flags = CLUSTER_FLAG_FREE;
3250 list_add_tail(&ci->list, &si->free_clusters);
3254 return cluster_info;
3256 err_free:
3257 kvfree(cluster_info);
3258 err:
3259 return ERR_PTR(err);
3262 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3264 struct swap_info_struct *si;
3265 struct filename *name;
3266 struct file *swap_file = NULL;
3267 struct address_space *mapping;
3268 struct dentry *dentry;
3269 int prio;
3270 int error;
3271 union swap_header *swap_header;
3272 int nr_extents;
3273 sector_t span;
3274 unsigned long maxpages;
3275 unsigned char *swap_map = NULL;
3276 unsigned long *zeromap = NULL;
3277 struct swap_cluster_info *cluster_info = NULL;
3278 struct folio *folio = NULL;
3279 struct inode *inode = NULL;
3280 bool inced_nr_rotate_swap = false;
3282 if (swap_flags & ~SWAP_FLAGS_VALID)
3283 return -EINVAL;
3285 if (!capable(CAP_SYS_ADMIN))
3286 return -EPERM;
3288 if (!swap_avail_heads)
3289 return -ENOMEM;
3291 si = alloc_swap_info();
3292 if (IS_ERR(si))
3293 return PTR_ERR(si);
3295 INIT_WORK(&si->discard_work, swap_discard_work);
3296 INIT_WORK(&si->reclaim_work, swap_reclaim_work);
3298 name = getname(specialfile);
3299 if (IS_ERR(name)) {
3300 error = PTR_ERR(name);
3301 name = NULL;
3302 goto bad_swap;
3304 swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
3305 if (IS_ERR(swap_file)) {
3306 error = PTR_ERR(swap_file);
3307 swap_file = NULL;
3308 goto bad_swap;
3311 si->swap_file = swap_file;
3312 mapping = swap_file->f_mapping;
3313 dentry = swap_file->f_path.dentry;
3314 inode = mapping->host;
3316 error = claim_swapfile(si, inode);
3317 if (unlikely(error))
3318 goto bad_swap;
3320 inode_lock(inode);
3321 if (d_unlinked(dentry) || cant_mount(dentry)) {
3322 error = -ENOENT;
3323 goto bad_swap_unlock_inode;
3325 if (IS_SWAPFILE(inode)) {
3326 error = -EBUSY;
3327 goto bad_swap_unlock_inode;
3331 * Read the swap header.
3333 if (!mapping->a_ops->read_folio) {
3334 error = -EINVAL;
3335 goto bad_swap_unlock_inode;
3337 folio = read_mapping_folio(mapping, 0, swap_file);
3338 if (IS_ERR(folio)) {
3339 error = PTR_ERR(folio);
3340 goto bad_swap_unlock_inode;
3342 swap_header = kmap_local_folio(folio, 0);
3344 maxpages = read_swap_header(si, swap_header, inode);
3345 if (unlikely(!maxpages)) {
3346 error = -EINVAL;
3347 goto bad_swap_unlock_inode;
3350 /* OK, set up the swap map and apply the bad block list */
3351 swap_map = vzalloc(maxpages);
3352 if (!swap_map) {
3353 error = -ENOMEM;
3354 goto bad_swap_unlock_inode;
3357 error = swap_cgroup_swapon(si->type, maxpages);
3358 if (error)
3359 goto bad_swap_unlock_inode;
3361 nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map,
3362 maxpages, &span);
3363 if (unlikely(nr_extents < 0)) {
3364 error = nr_extents;
3365 goto bad_swap_unlock_inode;
3369 * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
3370 * be above MAX_PAGE_ORDER incase of a large swap file.
3372 zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long),
3373 GFP_KERNEL | __GFP_ZERO);
3374 if (!zeromap) {
3375 error = -ENOMEM;
3376 goto bad_swap_unlock_inode;
3379 if (si->bdev && bdev_stable_writes(si->bdev))
3380 si->flags |= SWP_STABLE_WRITES;
3382 if (si->bdev && bdev_synchronous(si->bdev))
3383 si->flags |= SWP_SYNCHRONOUS_IO;
3385 if (si->bdev && bdev_nonrot(si->bdev)) {
3386 si->flags |= SWP_SOLIDSTATE;
3387 } else {
3388 atomic_inc(&nr_rotate_swap);
3389 inced_nr_rotate_swap = true;
3392 cluster_info = setup_clusters(si, swap_header, maxpages);
3393 if (IS_ERR(cluster_info)) {
3394 error = PTR_ERR(cluster_info);
3395 cluster_info = NULL;
3396 goto bad_swap_unlock_inode;
3399 if ((swap_flags & SWAP_FLAG_DISCARD) &&
3400 si->bdev && bdev_max_discard_sectors(si->bdev)) {
3402 * When discard is enabled for swap with no particular
3403 * policy flagged, we set all swap discard flags here in
3404 * order to sustain backward compatibility with older
3405 * swapon(8) releases.
3407 si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3408 SWP_PAGE_DISCARD);
3411 * By flagging sys_swapon, a sysadmin can tell us to
3412 * either do single-time area discards only, or to just
3413 * perform discards for released swap page-clusters.
3414 * Now it's time to adjust the p->flags accordingly.
3416 if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3417 si->flags &= ~SWP_PAGE_DISCARD;
3418 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3419 si->flags &= ~SWP_AREA_DISCARD;
3421 /* issue a swapon-time discard if it's still required */
3422 if (si->flags & SWP_AREA_DISCARD) {
3423 int err = discard_swap(si);
3424 if (unlikely(err))
3425 pr_err("swapon: discard_swap(%p): %d\n",
3426 si, err);
3430 error = init_swap_address_space(si->type, maxpages);
3431 if (error)
3432 goto bad_swap_unlock_inode;
3434 error = zswap_swapon(si->type, maxpages);
3435 if (error)
3436 goto free_swap_address_space;
3439 * Flush any pending IO and dirty mappings before we start using this
3440 * swap device.
3442 inode->i_flags |= S_SWAPFILE;
3443 error = inode_drain_writes(inode);
3444 if (error) {
3445 inode->i_flags &= ~S_SWAPFILE;
3446 goto free_swap_zswap;
3449 mutex_lock(&swapon_mutex);
3450 prio = -1;
3451 if (swap_flags & SWAP_FLAG_PREFER)
3452 prio =
3453 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3454 enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
3456 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n",
3457 K(si->pages), name->name, si->prio, nr_extents,
3458 K((unsigned long long)span),
3459 (si->flags & SWP_SOLIDSTATE) ? "SS" : "",
3460 (si->flags & SWP_DISCARDABLE) ? "D" : "",
3461 (si->flags & SWP_AREA_DISCARD) ? "s" : "",
3462 (si->flags & SWP_PAGE_DISCARD) ? "c" : "");
3464 mutex_unlock(&swapon_mutex);
3465 atomic_inc(&proc_poll_event);
3466 wake_up_interruptible(&proc_poll_wait);
3468 error = 0;
3469 goto out;
3470 free_swap_zswap:
3471 zswap_swapoff(si->type);
3472 free_swap_address_space:
3473 exit_swap_address_space(si->type);
3474 bad_swap_unlock_inode:
3475 inode_unlock(inode);
3476 bad_swap:
3477 free_percpu(si->percpu_cluster);
3478 si->percpu_cluster = NULL;
3479 kfree(si->global_cluster);
3480 si->global_cluster = NULL;
3481 inode = NULL;
3482 destroy_swap_extents(si);
3483 swap_cgroup_swapoff(si->type);
3484 spin_lock(&swap_lock);
3485 si->swap_file = NULL;
3486 si->flags = 0;
3487 spin_unlock(&swap_lock);
3488 vfree(swap_map);
3489 kvfree(zeromap);
3490 kvfree(cluster_info);
3491 if (inced_nr_rotate_swap)
3492 atomic_dec(&nr_rotate_swap);
3493 if (swap_file)
3494 filp_close(swap_file, NULL);
3495 out:
3496 if (!IS_ERR_OR_NULL(folio))
3497 folio_release_kmap(folio, swap_header);
3498 if (name)
3499 putname(name);
3500 if (inode)
3501 inode_unlock(inode);
3502 if (!error)
3503 enable_swap_slots_cache();
3504 return error;
3507 void si_swapinfo(struct sysinfo *val)
3509 unsigned int type;
3510 unsigned long nr_to_be_unused = 0;
3512 spin_lock(&swap_lock);
3513 for (type = 0; type < nr_swapfiles; type++) {
3514 struct swap_info_struct *si = swap_info[type];
3516 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3517 nr_to_be_unused += swap_usage_in_pages(si);
3519 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3520 val->totalswap = total_swap_pages + nr_to_be_unused;
3521 spin_unlock(&swap_lock);
3525 * Verify that nr swap entries are valid and increment their swap map counts.
3527 * Returns error code in following case.
3528 * - success -> 0
3529 * - swp_entry is invalid -> EINVAL
3530 * - swp_entry is migration entry -> EINVAL
3531 * - swap-cache reference is requested but there is already one. -> EEXIST
3532 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3533 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3535 static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
3537 struct swap_info_struct *si;
3538 struct swap_cluster_info *ci;
3539 unsigned long offset;
3540 unsigned char count;
3541 unsigned char has_cache;
3542 int err, i;
3544 si = swp_swap_info(entry);
3546 offset = swp_offset(entry);
3547 VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
3548 VM_WARN_ON(usage == 1 && nr > 1);
3549 ci = lock_cluster(si, offset);
3551 err = 0;
3552 for (i = 0; i < nr; i++) {
3553 count = si->swap_map[offset + i];
3556 * swapin_readahead() doesn't check if a swap entry is valid, so the
3557 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3559 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3560 err = -ENOENT;
3561 goto unlock_out;
3564 has_cache = count & SWAP_HAS_CACHE;
3565 count &= ~SWAP_HAS_CACHE;
3567 if (!count && !has_cache) {
3568 err = -ENOENT;
3569 } else if (usage == SWAP_HAS_CACHE) {
3570 if (has_cache)
3571 err = -EEXIST;
3572 } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) {
3573 err = -EINVAL;
3576 if (err)
3577 goto unlock_out;
3580 for (i = 0; i < nr; i++) {
3581 count = si->swap_map[offset + i];
3582 has_cache = count & SWAP_HAS_CACHE;
3583 count &= ~SWAP_HAS_CACHE;
3585 if (usage == SWAP_HAS_CACHE)
3586 has_cache = SWAP_HAS_CACHE;
3587 else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3588 count += usage;
3589 else if (swap_count_continued(si, offset + i, count))
3590 count = COUNT_CONTINUED;
3591 else {
3593 * Don't need to rollback changes, because if
3594 * usage == 1, there must be nr == 1.
3596 err = -ENOMEM;
3597 goto unlock_out;
3600 WRITE_ONCE(si->swap_map[offset + i], count | has_cache);
3603 unlock_out:
3604 unlock_cluster(ci);
3605 return err;
3609 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3610 * (in which case its reference count is never incremented).
3612 void swap_shmem_alloc(swp_entry_t entry, int nr)
3614 __swap_duplicate(entry, SWAP_MAP_SHMEM, nr);
3618 * Increase reference count of swap entry by 1.
3619 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3620 * but could not be atomically allocated. Returns 0, just as if it succeeded,
3621 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3622 * might occur if a page table entry has got corrupted.
3624 int swap_duplicate(swp_entry_t entry)
3626 int err = 0;
3628 while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
3629 err = add_swap_count_continuation(entry, GFP_ATOMIC);
3630 return err;
3634 * @entry: first swap entry from which we allocate nr swap cache.
3636 * Called when allocating swap cache for existing swap entries,
3637 * This can return error codes. Returns 0 at success.
3638 * -EEXIST means there is a swap cache.
3639 * Note: return code is different from swap_duplicate().
3641 int swapcache_prepare(swp_entry_t entry, int nr)
3643 return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
3646 void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
3648 unsigned long offset = swp_offset(entry);
3650 cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE);
3653 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3655 return swap_type_to_swap_info(swp_type(entry));
3659 * out-of-line methods to avoid include hell.
3661 struct address_space *swapcache_mapping(struct folio *folio)
3663 return swp_swap_info(folio->swap)->swap_file->f_mapping;
3665 EXPORT_SYMBOL_GPL(swapcache_mapping);
3667 pgoff_t __folio_swap_cache_index(struct folio *folio)
3669 return swap_cache_index(folio->swap);
3671 EXPORT_SYMBOL_GPL(__folio_swap_cache_index);
3674 * add_swap_count_continuation - called when a swap count is duplicated
3675 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3676 * page of the original vmalloc'ed swap_map, to hold the continuation count
3677 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
3678 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3680 * These continuation pages are seldom referenced: the common paths all work
3681 * on the original swap_map, only referring to a continuation page when the
3682 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3684 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3685 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3686 * can be called after dropping locks.
3688 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3690 struct swap_info_struct *si;
3691 struct swap_cluster_info *ci;
3692 struct page *head;
3693 struct page *page;
3694 struct page *list_page;
3695 pgoff_t offset;
3696 unsigned char count;
3697 int ret = 0;
3700 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3701 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3703 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3705 si = get_swap_device(entry);
3706 if (!si) {
3708 * An acceptable race has occurred since the failing
3709 * __swap_duplicate(): the swap device may be swapoff
3711 goto outer;
3714 offset = swp_offset(entry);
3716 ci = lock_cluster(si, offset);
3718 count = swap_count(si->swap_map[offset]);
3720 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3722 * The higher the swap count, the more likely it is that tasks
3723 * will race to add swap count continuation: we need to avoid
3724 * over-provisioning.
3726 goto out;
3729 if (!page) {
3730 ret = -ENOMEM;
3731 goto out;
3734 head = vmalloc_to_page(si->swap_map + offset);
3735 offset &= ~PAGE_MASK;
3737 spin_lock(&si->cont_lock);
3739 * Page allocation does not initialize the page's lru field,
3740 * but it does always reset its private field.
3742 if (!page_private(head)) {
3743 BUG_ON(count & COUNT_CONTINUED);
3744 INIT_LIST_HEAD(&head->lru);
3745 set_page_private(head, SWP_CONTINUED);
3746 si->flags |= SWP_CONTINUED;
3749 list_for_each_entry(list_page, &head->lru, lru) {
3750 unsigned char *map;
3753 * If the previous map said no continuation, but we've found
3754 * a continuation page, free our allocation and use this one.
3756 if (!(count & COUNT_CONTINUED))
3757 goto out_unlock_cont;
3759 map = kmap_local_page(list_page) + offset;
3760 count = *map;
3761 kunmap_local(map);
3764 * If this continuation count now has some space in it,
3765 * free our allocation and use this one.
3767 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3768 goto out_unlock_cont;
3771 list_add_tail(&page->lru, &head->lru);
3772 page = NULL; /* now it's attached, don't free it */
3773 out_unlock_cont:
3774 spin_unlock(&si->cont_lock);
3775 out:
3776 unlock_cluster(ci);
3777 put_swap_device(si);
3778 outer:
3779 if (page)
3780 __free_page(page);
3781 return ret;
3785 * swap_count_continued - when the original swap_map count is incremented
3786 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3787 * into, carry if so, or else fail until a new continuation page is allocated;
3788 * when the original swap_map count is decremented from 0 with continuation,
3789 * borrow from the continuation and report whether it still holds more.
3790 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3791 * lock.
3793 static bool swap_count_continued(struct swap_info_struct *si,
3794 pgoff_t offset, unsigned char count)
3796 struct page *head;
3797 struct page *page;
3798 unsigned char *map;
3799 bool ret;
3801 head = vmalloc_to_page(si->swap_map + offset);
3802 if (page_private(head) != SWP_CONTINUED) {
3803 BUG_ON(count & COUNT_CONTINUED);
3804 return false; /* need to add count continuation */
3807 spin_lock(&si->cont_lock);
3808 offset &= ~PAGE_MASK;
3809 page = list_next_entry(head, lru);
3810 map = kmap_local_page(page) + offset;
3812 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
3813 goto init_map; /* jump over SWAP_CONT_MAX checks */
3815 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3817 * Think of how you add 1 to 999
3819 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3820 kunmap_local(map);
3821 page = list_next_entry(page, lru);
3822 BUG_ON(page == head);
3823 map = kmap_local_page(page) + offset;
3825 if (*map == SWAP_CONT_MAX) {
3826 kunmap_local(map);
3827 page = list_next_entry(page, lru);
3828 if (page == head) {
3829 ret = false; /* add count continuation */
3830 goto out;
3832 map = kmap_local_page(page) + offset;
3833 init_map: *map = 0; /* we didn't zero the page */
3835 *map += 1;
3836 kunmap_local(map);
3837 while ((page = list_prev_entry(page, lru)) != head) {
3838 map = kmap_local_page(page) + offset;
3839 *map = COUNT_CONTINUED;
3840 kunmap_local(map);
3842 ret = true; /* incremented */
3844 } else { /* decrementing */
3846 * Think of how you subtract 1 from 1000
3848 BUG_ON(count != COUNT_CONTINUED);
3849 while (*map == COUNT_CONTINUED) {
3850 kunmap_local(map);
3851 page = list_next_entry(page, lru);
3852 BUG_ON(page == head);
3853 map = kmap_local_page(page) + offset;
3855 BUG_ON(*map == 0);
3856 *map -= 1;
3857 if (*map == 0)
3858 count = 0;
3859 kunmap_local(map);
3860 while ((page = list_prev_entry(page, lru)) != head) {
3861 map = kmap_local_page(page) + offset;
3862 *map = SWAP_CONT_MAX | count;
3863 count = COUNT_CONTINUED;
3864 kunmap_local(map);
3866 ret = count == COUNT_CONTINUED;
3868 out:
3869 spin_unlock(&si->cont_lock);
3870 return ret;
3874 * free_swap_count_continuations - swapoff free all the continuation pages
3875 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3877 static void free_swap_count_continuations(struct swap_info_struct *si)
3879 pgoff_t offset;
3881 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3882 struct page *head;
3883 head = vmalloc_to_page(si->swap_map + offset);
3884 if (page_private(head)) {
3885 struct page *page, *next;
3887 list_for_each_entry_safe(page, next, &head->lru, lru) {
3888 list_del(&page->lru);
3889 __free_page(page);
3895 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3896 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
3898 struct swap_info_struct *si, *next;
3899 int nid = folio_nid(folio);
3901 if (!(gfp & __GFP_IO))
3902 return;
3904 if (!__has_usable_swap())
3905 return;
3907 if (!blk_cgroup_congested())
3908 return;
3911 * We've already scheduled a throttle, avoid taking the global swap
3912 * lock.
3914 if (current->throttle_disk)
3915 return;
3917 spin_lock(&swap_avail_lock);
3918 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3919 avail_lists[nid]) {
3920 if (si->bdev) {
3921 blkcg_schedule_throttle(si->bdev->bd_disk, true);
3922 break;
3925 spin_unlock(&swap_avail_lock);
3927 #endif
3929 static int __init swapfile_init(void)
3931 int nid;
3933 swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3934 GFP_KERNEL);
3935 if (!swap_avail_heads) {
3936 pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3937 return -ENOMEM;
3940 for_each_node(nid)
3941 plist_head_init(&swap_avail_heads[nid]);
3943 swapfile_maximum_size = arch_max_swapfile_size();
3945 #ifdef CONFIG_MIGRATION
3946 if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
3947 swap_migration_ad_supported = true;
3948 #endif /* CONFIG_MIGRATION */
3950 return 0;
3952 subsys_initcall(swapfile_init);