1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/spinlock.h>
6 #include <linux/linkage.h>
7 #include <linux/mmzone.h>
8 #include <linux/list.h>
9 #include <linux/memcontrol.h>
10 #include <linux/sched.h>
11 #include <linux/node.h>
13 #include <linux/atomic.h>
14 #include <linux/page-flags.h>
17 struct notifier_block
;
23 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
24 #define SWAP_FLAG_PRIO_MASK 0x7fff
25 #define SWAP_FLAG_PRIO_SHIFT 0
26 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
27 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
28 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
30 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
31 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
32 SWAP_FLAG_DISCARD_PAGES)
35 static inline int current_is_kswapd(void)
37 return current
->flags
& PF_KSWAPD
;
41 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
42 * be swapped to. The swap type and the offset into that swap type are
43 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
44 * for the type means that the maximum number of swapcache pages is 27 bits
45 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
46 * the type/offset into the pte as 5/27 as well.
48 #define MAX_SWAPFILES_SHIFT 5
51 * Use some of the swap files numbers for other purposes. This
52 * is a convenient way to hook into the VM to trigger special
57 * Unaddressable device memory support. See include/linux/hmm.h and
58 * Documentation/vm/hmm.rst. Short description is we need struct pages for
59 * device memory that is unaddressable (inaccessible) by CPU, so that we can
60 * migrate part of a process memory to device memory.
62 * When a page is migrated from CPU to device, we set the CPU page table entry
63 * to a special SWP_DEVICE_* entry.
65 #ifdef CONFIG_DEVICE_PRIVATE
66 #define SWP_DEVICE_NUM 2
67 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
68 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
70 #define SWP_DEVICE_NUM 0
74 * NUMA node memory migration support
76 #ifdef CONFIG_MIGRATION
77 #define SWP_MIGRATION_NUM 2
78 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
79 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
81 #define SWP_MIGRATION_NUM 0
85 * Handling of hardware poisoned pages with memory corruption.
87 #ifdef CONFIG_MEMORY_FAILURE
88 #define SWP_HWPOISON_NUM 1
89 #define SWP_HWPOISON MAX_SWAPFILES
91 #define SWP_HWPOISON_NUM 0
94 #define MAX_SWAPFILES \
95 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
96 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
99 * Magic header for a swap area. The first part of the union is
100 * what the swap magic looks like for the old (limited to 128MB)
101 * swap area format, the second part of the union adds - in the
102 * old reserved area - some extra information. Note that the first
103 * kilobyte is reserved for boot loader or disk label stuff...
105 * Having the magic at the end of the PAGE_SIZE makes detecting swap
106 * areas somewhat tricky on machines that support multiple page sizes.
107 * For 2.5 we'll probably want to move the magic to just beyond the
112 char reserved
[PAGE_SIZE
- 10];
113 char magic
[10]; /* SWAP-SPACE or SWAPSPACE2 */
116 char bootbits
[1024]; /* Space for disklabel etc. */
120 unsigned char sws_uuid
[16];
121 unsigned char sws_volume
[16];
128 * current->reclaim_state points to one of these when a task is running
131 struct reclaim_state
{
132 unsigned long reclaimed_slab
;
137 struct address_space
;
139 struct writeback_control
;
143 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
144 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
145 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
146 * from setup, they're handled identically.
148 * We always assume that blocks are of size PAGE_SIZE.
151 struct rb_node rb_node
;
154 sector_t start_block
;
158 * Max bad pages in the new format..
160 #define MAX_SWAP_BADPAGES \
161 ((offsetof(union swap_header, magic.magic) - \
162 offsetof(union swap_header, info.badpages)) / sizeof(int))
165 SWP_USED
= (1 << 0), /* is slot in swap_info[] used? */
166 SWP_WRITEOK
= (1 << 1), /* ok to write to this swap? */
167 SWP_DISCARDABLE
= (1 << 2), /* blkdev support discard */
168 SWP_DISCARDING
= (1 << 3), /* now discarding a free cluster */
169 SWP_SOLIDSTATE
= (1 << 4), /* blkdev seeks are cheap */
170 SWP_CONTINUED
= (1 << 5), /* swap_map has count continuation */
171 SWP_BLKDEV
= (1 << 6), /* its a block device */
172 SWP_ACTIVATED
= (1 << 7), /* set after swap_activate success */
173 SWP_FS
= (1 << 8), /* swap file goes through fs */
174 SWP_AREA_DISCARD
= (1 << 9), /* single-time swap area discards */
175 SWP_PAGE_DISCARD
= (1 << 10), /* freed swap page-cluster discards */
176 SWP_STABLE_WRITES
= (1 << 11), /* no overwrite PG_writeback pages */
177 SWP_SYNCHRONOUS_IO
= (1 << 12), /* synchronous IO is efficient */
178 SWP_VALID
= (1 << 13), /* swap is valid to be operated on? */
179 /* add others here before... */
180 SWP_SCANNING
= (1 << 14), /* refcount in scan_swap_map */
183 #define SWAP_CLUSTER_MAX 32UL
184 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
186 #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
187 #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
188 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
189 #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
190 #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
191 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
194 * We use this to track usage of a cluster. A cluster is a block of swap disk
195 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
196 * free clusters are organized into a list. We fetch an entry from the list to
197 * get a free cluster.
199 * The data field stores next cluster if the cluster is free or cluster usage
200 * counter otherwise. The flags field determines if a cluster is free. This is
201 * protected by swap_info_struct.lock.
203 struct swap_cluster_info
{
205 * Protect swap_cluster_info fields
206 * and swap_info_struct->swap_map
207 * elements correspond to the swap
210 unsigned int data
:24;
211 unsigned int flags
:8;
213 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
214 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
215 #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
218 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
219 * its own cluster and swapout sequentially. The purpose is to optimize swapout
222 struct percpu_cluster
{
223 struct swap_cluster_info index
; /* Current cluster index */
224 unsigned int next
; /* Likely next allocation offset */
227 struct swap_cluster_list
{
228 struct swap_cluster_info head
;
229 struct swap_cluster_info tail
;
233 * The in-memory structure used to track swap areas.
235 struct swap_info_struct
{
236 unsigned long flags
; /* SWP_USED etc: see above */
237 signed short prio
; /* swap priority of this type */
238 struct plist_node list
; /* entry in swap_active_head */
239 signed char type
; /* strange name for an index */
240 unsigned int max
; /* extent of the swap_map */
241 unsigned char *swap_map
; /* vmalloc'ed array of usage counts */
242 struct swap_cluster_info
*cluster_info
; /* cluster info. Only for SSD */
243 struct swap_cluster_list free_clusters
; /* free clusters list */
244 unsigned int lowest_bit
; /* index of first free in swap_map */
245 unsigned int highest_bit
; /* index of last free in swap_map */
246 unsigned int pages
; /* total of usable pages of swap */
247 unsigned int inuse_pages
; /* number of those currently in use */
248 unsigned int cluster_next
; /* likely index for next allocation */
249 unsigned int cluster_nr
; /* countdown to next cluster search */
250 struct percpu_cluster __percpu
*percpu_cluster
; /* per cpu's swap location */
251 struct rb_root swap_extent_root
;/* root of the swap extent rbtree */
252 struct block_device
*bdev
; /* swap device or bdev of swap file */
253 struct file
*swap_file
; /* seldom referenced */
254 unsigned int old_block_size
; /* seldom referenced */
255 #ifdef CONFIG_FRONTSWAP
256 unsigned long *frontswap_map
; /* frontswap in-use, one bit per page */
257 atomic_t frontswap_pages
; /* frontswap pages in-use counter */
260 * protect map scan related fields like
261 * swap_map, lowest_bit, highest_bit,
262 * inuse_pages, cluster_next,
263 * cluster_nr, lowest_alloc,
264 * highest_alloc, free/discard cluster
265 * list. other fields are only changed
266 * at swapon/swapoff, so are protected
267 * by swap_lock. changing flags need
268 * hold this lock and swap_lock. If
269 * both locks need hold, hold swap_lock
272 spinlock_t cont_lock
; /*
273 * protect swap count continuation page
276 struct work_struct discard_work
; /* discard worker */
277 struct swap_cluster_list discard_clusters
; /* discard clusters list */
278 struct plist_node avail_lists
[0]; /*
279 * entries in swap_avail_heads, one
281 * Must be last as the number of the
282 * array is nr_node_ids, which is not
283 * a fixed value so have to allocate
285 * And it has to be an array so that
286 * plist_for_each_* can work.
291 #define SWAP_RA_ORDER_CEILING 5
293 /* Avoid stack overflow, because we need to save part of page table */
294 #define SWAP_RA_ORDER_CEILING 3
295 #define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
298 struct vma_swap_readahead
{
300 unsigned short offset
;
301 unsigned short nr_pte
;
305 pte_t ptes
[SWAP_RA_PTE_CACHE_SIZE
];
309 /* linux/mm/workingset.c */
310 void *workingset_eviction(struct page
*page
, struct mem_cgroup
*target_memcg
);
311 void workingset_refault(struct page
*page
, void *shadow
);
312 void workingset_activation(struct page
*page
);
314 /* Only track the nodes of mappings with shadow entries */
315 void workingset_update_node(struct xa_node
*node
);
316 #define mapping_set_update(xas, mapping) do { \
317 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
318 xas_set_update(xas, workingset_update_node); \
321 /* linux/mm/page_alloc.c */
322 extern unsigned long totalreserve_pages
;
323 extern unsigned long nr_free_buffer_pages(void);
324 extern unsigned long nr_free_pagecache_pages(void);
326 /* Definition of global_zone_page_state not available yet */
327 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
330 /* linux/mm/swap.c */
331 extern void lru_cache_add(struct page
*);
332 extern void lru_cache_add_anon(struct page
*page
);
333 extern void lru_cache_add_file(struct page
*page
);
334 extern void lru_add_page_tail(struct page
*page
, struct page
*page_tail
,
335 struct lruvec
*lruvec
, struct list_head
*head
);
336 extern void activate_page(struct page
*);
337 extern void mark_page_accessed(struct page
*);
338 extern void lru_add_drain(void);
339 extern void lru_add_drain_cpu(int cpu
);
340 extern void lru_add_drain_all(void);
341 extern void rotate_reclaimable_page(struct page
*page
);
342 extern void deactivate_file_page(struct page
*page
);
343 extern void deactivate_page(struct page
*page
);
344 extern void mark_page_lazyfree(struct page
*page
);
345 extern void swap_setup(void);
347 extern void lru_cache_add_active_or_unevictable(struct page
*page
,
348 struct vm_area_struct
*vma
);
350 /* linux/mm/vmscan.c */
351 extern unsigned long zone_reclaimable_pages(struct zone
*zone
);
352 extern unsigned long try_to_free_pages(struct zonelist
*zonelist
, int order
,
353 gfp_t gfp_mask
, nodemask_t
*mask
);
354 extern int __isolate_lru_page(struct page
*page
, isolate_mode_t mode
);
355 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup
*memcg
,
356 unsigned long nr_pages
,
359 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup
*mem
,
360 gfp_t gfp_mask
, bool noswap
,
362 unsigned long *nr_scanned
);
363 extern unsigned long shrink_all_memory(unsigned long nr_pages
);
364 extern int vm_swappiness
;
365 extern int remove_mapping(struct address_space
*mapping
, struct page
*page
);
366 extern unsigned long vm_total_pages
;
368 extern unsigned long reclaim_pages(struct list_head
*page_list
);
370 extern int node_reclaim_mode
;
371 extern int sysctl_min_unmapped_ratio
;
372 extern int sysctl_min_slab_ratio
;
374 #define node_reclaim_mode 0
377 extern int page_evictable(struct page
*page
);
378 extern void check_move_unevictable_pages(struct pagevec
*pvec
);
380 extern int kswapd_run(int nid
);
381 extern void kswapd_stop(int nid
);
385 #include <linux/blk_types.h> /* for bio_end_io_t */
387 /* linux/mm/page_io.c */
388 extern int swap_readpage(struct page
*page
, bool do_poll
);
389 extern int swap_writepage(struct page
*page
, struct writeback_control
*wbc
);
390 extern void end_swap_bio_write(struct bio
*bio
);
391 extern int __swap_writepage(struct page
*page
, struct writeback_control
*wbc
,
392 bio_end_io_t end_write_func
);
393 extern int swap_set_page_dirty(struct page
*page
);
395 int add_swap_extent(struct swap_info_struct
*sis
, unsigned long start_page
,
396 unsigned long nr_pages
, sector_t start_block
);
397 int generic_swapfile_activate(struct swap_info_struct
*, struct file
*,
400 /* linux/mm/swap_state.c */
401 /* One swap address space for each 64M swap space */
402 #define SWAP_ADDRESS_SPACE_SHIFT 14
403 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
404 extern struct address_space
*swapper_spaces
[];
405 #define swap_address_space(entry) \
406 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
407 >> SWAP_ADDRESS_SPACE_SHIFT])
408 extern unsigned long total_swapcache_pages(void);
409 extern void show_swap_cache_info(void);
410 extern int add_to_swap(struct page
*page
);
411 extern int add_to_swap_cache(struct page
*, swp_entry_t
, gfp_t
);
412 extern int __add_to_swap_cache(struct page
*page
, swp_entry_t entry
);
413 extern void __delete_from_swap_cache(struct page
*, swp_entry_t entry
);
414 extern void delete_from_swap_cache(struct page
*);
415 extern void free_page_and_swap_cache(struct page
*);
416 extern void free_pages_and_swap_cache(struct page
**, int);
417 extern struct page
*lookup_swap_cache(swp_entry_t entry
,
418 struct vm_area_struct
*vma
,
420 extern struct page
*read_swap_cache_async(swp_entry_t
, gfp_t
,
421 struct vm_area_struct
*vma
, unsigned long addr
,
423 extern struct page
*__read_swap_cache_async(swp_entry_t
, gfp_t
,
424 struct vm_area_struct
*vma
, unsigned long addr
,
425 bool *new_page_allocated
);
426 extern struct page
*swap_cluster_readahead(swp_entry_t entry
, gfp_t flag
,
427 struct vm_fault
*vmf
);
428 extern struct page
*swapin_readahead(swp_entry_t entry
, gfp_t flag
,
429 struct vm_fault
*vmf
);
431 /* linux/mm/swapfile.c */
432 extern atomic_long_t nr_swap_pages
;
433 extern long total_swap_pages
;
434 extern atomic_t nr_rotate_swap
;
435 extern bool has_usable_swap(void);
437 /* Swap 50% full? Release swapcache more aggressively.. */
438 static inline bool vm_swap_full(void)
440 return atomic_long_read(&nr_swap_pages
) * 2 < total_swap_pages
;
443 static inline long get_nr_swap_pages(void)
445 return atomic_long_read(&nr_swap_pages
);
448 extern void si_swapinfo(struct sysinfo
*);
449 extern swp_entry_t
get_swap_page(struct page
*page
);
450 extern void put_swap_page(struct page
*page
, swp_entry_t entry
);
451 extern swp_entry_t
get_swap_page_of_type(int);
452 extern int get_swap_pages(int n
, swp_entry_t swp_entries
[], int entry_size
);
453 extern int add_swap_count_continuation(swp_entry_t
, gfp_t
);
454 extern void swap_shmem_alloc(swp_entry_t
);
455 extern int swap_duplicate(swp_entry_t
);
456 extern int swapcache_prepare(swp_entry_t
);
457 extern void swap_free(swp_entry_t
);
458 extern void swapcache_free_entries(swp_entry_t
*entries
, int n
);
459 extern int free_swap_and_cache(swp_entry_t
);
460 extern int swap_type_of(dev_t
, sector_t
, struct block_device
**);
461 extern unsigned int count_swap_pages(int, int);
462 extern sector_t
map_swap_page(struct page
*, struct block_device
**);
463 extern sector_t
swapdev_block(int, pgoff_t
);
464 extern int page_swapcount(struct page
*);
465 extern int __swap_count(swp_entry_t entry
);
466 extern int __swp_swapcount(swp_entry_t entry
);
467 extern int swp_swapcount(swp_entry_t entry
);
468 extern struct swap_info_struct
*page_swap_info(struct page
*);
469 extern struct swap_info_struct
*swp_swap_info(swp_entry_t entry
);
470 extern bool reuse_swap_page(struct page
*, int *);
471 extern int try_to_free_swap(struct page
*);
472 struct backing_dev_info
;
473 extern int init_swap_address_space(unsigned int type
, unsigned long nr_pages
);
474 extern void exit_swap_address_space(unsigned int type
);
475 extern struct swap_info_struct
*get_swap_device(swp_entry_t entry
);
477 static inline void put_swap_device(struct swap_info_struct
*si
)
482 #else /* CONFIG_SWAP */
484 static inline int swap_readpage(struct page
*page
, bool do_poll
)
489 static inline struct swap_info_struct
*swp_swap_info(swp_entry_t entry
)
494 #define swap_address_space(entry) (NULL)
495 #define get_nr_swap_pages() 0L
496 #define total_swap_pages 0L
497 #define total_swapcache_pages() 0UL
498 #define vm_swap_full() 0
500 #define si_swapinfo(val) \
501 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
502 /* only sparc can not include linux/pagemap.h in this file
503 * so leave put_page and release_pages undeclared... */
504 #define free_page_and_swap_cache(page) \
506 #define free_pages_and_swap_cache(pages, nr) \
507 release_pages((pages), (nr));
509 static inline void show_swap_cache_info(void)
513 #define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
514 #define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
516 static inline int add_swap_count_continuation(swp_entry_t swp
, gfp_t gfp_mask
)
521 static inline void swap_shmem_alloc(swp_entry_t swp
)
525 static inline int swap_duplicate(swp_entry_t swp
)
530 static inline void swap_free(swp_entry_t swp
)
534 static inline void put_swap_page(struct page
*page
, swp_entry_t swp
)
538 static inline struct page
*swap_cluster_readahead(swp_entry_t entry
,
539 gfp_t gfp_mask
, struct vm_fault
*vmf
)
544 static inline struct page
*swapin_readahead(swp_entry_t swp
, gfp_t gfp_mask
,
545 struct vm_fault
*vmf
)
550 static inline int swap_writepage(struct page
*p
, struct writeback_control
*wbc
)
555 static inline struct page
*lookup_swap_cache(swp_entry_t swp
,
556 struct vm_area_struct
*vma
,
562 static inline int add_to_swap(struct page
*page
)
567 static inline int add_to_swap_cache(struct page
*page
, swp_entry_t entry
,
573 static inline void __delete_from_swap_cache(struct page
*page
,
578 static inline void delete_from_swap_cache(struct page
*page
)
582 static inline int page_swapcount(struct page
*page
)
587 static inline int __swap_count(swp_entry_t entry
)
592 static inline int __swp_swapcount(swp_entry_t entry
)
597 static inline int swp_swapcount(swp_entry_t entry
)
602 #define reuse_swap_page(page, total_map_swapcount) \
603 (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
605 static inline int try_to_free_swap(struct page
*page
)
610 static inline swp_entry_t
get_swap_page(struct page
*page
)
617 #endif /* CONFIG_SWAP */
619 #ifdef CONFIG_THP_SWAP
620 extern int split_swap_cluster(swp_entry_t entry
);
622 static inline int split_swap_cluster(swp_entry_t entry
)
629 static inline int mem_cgroup_swappiness(struct mem_cgroup
*memcg
)
631 /* Cgroup2 doesn't have per-cgroup swappiness */
632 if (cgroup_subsys_on_dfl(memory_cgrp_subsys
))
633 return vm_swappiness
;
636 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg
))
637 return vm_swappiness
;
639 return memcg
->swappiness
;
642 static inline int mem_cgroup_swappiness(struct mem_cgroup
*mem
)
644 return vm_swappiness
;
648 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
649 extern void mem_cgroup_throttle_swaprate(struct mem_cgroup
*memcg
, int node
,
652 static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup
*memcg
,
653 int node
, gfp_t gfp_mask
)
658 #ifdef CONFIG_MEMCG_SWAP
659 extern void mem_cgroup_swapout(struct page
*page
, swp_entry_t entry
);
660 extern int mem_cgroup_try_charge_swap(struct page
*page
, swp_entry_t entry
);
661 extern void mem_cgroup_uncharge_swap(swp_entry_t entry
, unsigned int nr_pages
);
662 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup
*memcg
);
663 extern bool mem_cgroup_swap_full(struct page
*page
);
665 static inline void mem_cgroup_swapout(struct page
*page
, swp_entry_t entry
)
669 static inline int mem_cgroup_try_charge_swap(struct page
*page
,
675 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry
,
676 unsigned int nr_pages
)
680 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup
*memcg
)
682 return get_nr_swap_pages();
685 static inline bool mem_cgroup_swap_full(struct page
*page
)
687 return vm_swap_full();
691 #endif /* __KERNEL__*/
692 #endif /* _LINUX_SWAP_H */