4 #include <linux/spinlock.h>
5 #include <linux/linkage.h>
6 #include <linux/mmzone.h>
7 #include <linux/list.h>
8 #include <linux/memcontrol.h>
9 #include <linux/sched.h>
10 #include <linux/node.h>
12 #include <linux/atomic.h>
13 #include <linux/page-flags.h>
16 struct notifier_block
;
20 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
21 #define SWAP_FLAG_PRIO_MASK 0x7fff
22 #define SWAP_FLAG_PRIO_SHIFT 0
23 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
24 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
25 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
27 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
28 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
29 SWAP_FLAG_DISCARD_PAGES)
31 static inline int current_is_kswapd(void)
33 return current
->flags
& PF_KSWAPD
;
37 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
38 * be swapped to. The swap type and the offset into that swap type are
39 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
40 * for the type means that the maximum number of swapcache pages is 27 bits
41 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
42 * the type/offset into the pte as 5/27 as well.
44 #define MAX_SWAPFILES_SHIFT 5
47 * Use some of the swap files numbers for other purposes. This
48 * is a convenient way to hook into the VM to trigger special
53 * NUMA node memory migration support
55 #ifdef CONFIG_MIGRATION
56 #define SWP_MIGRATION_NUM 2
57 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
58 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
60 #define SWP_MIGRATION_NUM 0
64 * Handling of hardware poisoned pages with memory corruption.
66 #ifdef CONFIG_MEMORY_FAILURE
67 #define SWP_HWPOISON_NUM 1
68 #define SWP_HWPOISON MAX_SWAPFILES
70 #define SWP_HWPOISON_NUM 0
73 #define MAX_SWAPFILES \
74 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
77 * Magic header for a swap area. The first part of the union is
78 * what the swap magic looks like for the old (limited to 128MB)
79 * swap area format, the second part of the union adds - in the
80 * old reserved area - some extra information. Note that the first
81 * kilobyte is reserved for boot loader or disk label stuff...
83 * Having the magic at the end of the PAGE_SIZE makes detecting swap
84 * areas somewhat tricky on machines that support multiple page sizes.
85 * For 2.5 we'll probably want to move the magic to just beyond the
90 char reserved
[PAGE_SIZE
- 10];
91 char magic
[10]; /* SWAP-SPACE or SWAPSPACE2 */
94 char bootbits
[1024]; /* Space for disklabel etc. */
98 unsigned char sws_uuid
[16];
99 unsigned char sws_volume
[16];
106 * current->reclaim_state points to one of these when a task is running
109 struct reclaim_state
{
110 unsigned long reclaimed_slab
;
115 struct address_space
;
117 struct writeback_control
;
121 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
122 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
123 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
124 * from setup, they're handled identically.
126 * We always assume that blocks are of size PAGE_SIZE.
129 struct list_head list
;
132 sector_t start_block
;
136 * Max bad pages in the new format..
138 #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
139 #define MAX_SWAP_BADPAGES \
140 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
143 SWP_USED
= (1 << 0), /* is slot in swap_info[] used? */
144 SWP_WRITEOK
= (1 << 1), /* ok to write to this swap? */
145 SWP_DISCARDABLE
= (1 << 2), /* blkdev support discard */
146 SWP_DISCARDING
= (1 << 3), /* now discarding a free cluster */
147 SWP_SOLIDSTATE
= (1 << 4), /* blkdev seeks are cheap */
148 SWP_CONTINUED
= (1 << 5), /* swap_map has count continuation */
149 SWP_BLKDEV
= (1 << 6), /* its a block device */
150 SWP_FILE
= (1 << 7), /* set after swap_activate success */
151 SWP_AREA_DISCARD
= (1 << 8), /* single-time swap area discards */
152 SWP_PAGE_DISCARD
= (1 << 9), /* freed swap page-cluster discards */
153 /* add others here before... */
154 SWP_SCANNING
= (1 << 10), /* refcount in scan_swap_map */
157 #define SWAP_CLUSTER_MAX 32UL
158 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
160 #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
161 #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
162 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
163 #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
164 #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
165 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
168 * We use this to track usage of a cluster. A cluster is a block of swap disk
169 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
170 * free clusters are organized into a list. We fetch an entry from the list to
171 * get a free cluster.
173 * The data field stores next cluster if the cluster is free or cluster usage
174 * counter otherwise. The flags field determines if a cluster is free. This is
175 * protected by swap_info_struct.lock.
177 struct swap_cluster_info
{
178 unsigned int data
:24;
179 unsigned int flags
:8;
181 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
182 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
185 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
186 * its own cluster and swapout sequentially. The purpose is to optimize swapout
189 struct percpu_cluster
{
190 struct swap_cluster_info index
; /* Current cluster index */
191 unsigned int next
; /* Likely next allocation offset */
195 * The in-memory structure used to track swap areas.
197 struct swap_info_struct
{
198 unsigned long flags
; /* SWP_USED etc: see above */
199 signed short prio
; /* swap priority of this type */
200 struct plist_node list
; /* entry in swap_active_head */
201 struct plist_node avail_list
; /* entry in swap_avail_head */
202 signed char type
; /* strange name for an index */
203 unsigned int max
; /* extent of the swap_map */
204 unsigned char *swap_map
; /* vmalloc'ed array of usage counts */
205 struct swap_cluster_info
*cluster_info
; /* cluster info. Only for SSD */
206 struct swap_cluster_info free_cluster_head
; /* free cluster list head */
207 struct swap_cluster_info free_cluster_tail
; /* free cluster list tail */
208 unsigned int lowest_bit
; /* index of first free in swap_map */
209 unsigned int highest_bit
; /* index of last free in swap_map */
210 unsigned int pages
; /* total of usable pages of swap */
211 unsigned int inuse_pages
; /* number of those currently in use */
212 unsigned int cluster_next
; /* likely index for next allocation */
213 unsigned int cluster_nr
; /* countdown to next cluster search */
214 struct percpu_cluster __percpu
*percpu_cluster
; /* per cpu's swap location */
215 struct swap_extent
*curr_swap_extent
;
216 struct swap_extent first_swap_extent
;
217 struct block_device
*bdev
; /* swap device or bdev of swap file */
218 struct file
*swap_file
; /* seldom referenced */
219 unsigned int old_block_size
; /* seldom referenced */
220 #ifdef CONFIG_FRONTSWAP
221 unsigned long *frontswap_map
; /* frontswap in-use, one bit per page */
222 atomic_t frontswap_pages
; /* frontswap pages in-use counter */
225 * protect map scan related fields like
226 * swap_map, lowest_bit, highest_bit,
227 * inuse_pages, cluster_next,
228 * cluster_nr, lowest_alloc,
229 * highest_alloc, free/discard cluster
230 * list. other fields are only changed
231 * at swapon/swapoff, so are protected
232 * by swap_lock. changing flags need
233 * hold this lock and swap_lock. If
234 * both locks need hold, hold swap_lock
237 struct work_struct discard_work
; /* discard worker */
238 struct swap_cluster_info discard_cluster_head
; /* list head of discard clusters */
239 struct swap_cluster_info discard_cluster_tail
; /* list tail of discard clusters */
242 /* linux/mm/workingset.c */
243 void *workingset_eviction(struct address_space
*mapping
, struct page
*page
);
244 bool workingset_refault(void *shadow
);
245 void workingset_activation(struct page
*page
);
246 extern struct list_lru workingset_shadow_nodes
;
248 static inline unsigned int workingset_node_pages(struct radix_tree_node
*node
)
250 return node
->count
& RADIX_TREE_COUNT_MASK
;
253 static inline void workingset_node_pages_inc(struct radix_tree_node
*node
)
258 static inline void workingset_node_pages_dec(struct radix_tree_node
*node
)
263 static inline unsigned int workingset_node_shadows(struct radix_tree_node
*node
)
265 return node
->count
>> RADIX_TREE_COUNT_SHIFT
;
268 static inline void workingset_node_shadows_inc(struct radix_tree_node
*node
)
270 node
->count
+= 1U << RADIX_TREE_COUNT_SHIFT
;
273 static inline void workingset_node_shadows_dec(struct radix_tree_node
*node
)
275 node
->count
-= 1U << RADIX_TREE_COUNT_SHIFT
;
278 /* linux/mm/page_alloc.c */
279 extern unsigned long totalram_pages
;
280 extern unsigned long totalreserve_pages
;
281 extern unsigned long nr_free_buffer_pages(void);
282 extern unsigned long nr_free_pagecache_pages(void);
284 /* Definition of global_page_state not available yet */
285 #define nr_free_pages() global_page_state(NR_FREE_PAGES)
288 /* linux/mm/swap.c */
289 extern void lru_cache_add(struct page
*);
290 extern void lru_cache_add_anon(struct page
*page
);
291 extern void lru_cache_add_file(struct page
*page
);
292 extern void lru_add_page_tail(struct page
*page
, struct page
*page_tail
,
293 struct lruvec
*lruvec
, struct list_head
*head
);
294 extern void activate_page(struct page
*);
295 extern void mark_page_accessed(struct page
*);
296 extern void lru_add_drain(void);
297 extern void lru_add_drain_cpu(int cpu
);
298 extern void lru_add_drain_all(void);
299 extern void rotate_reclaimable_page(struct page
*page
);
300 extern void deactivate_file_page(struct page
*page
);
301 extern void deactivate_page(struct page
*page
);
302 extern void swap_setup(void);
304 extern void add_page_to_unevictable_list(struct page
*page
);
306 extern void lru_cache_add_active_or_unevictable(struct page
*page
,
307 struct vm_area_struct
*vma
);
309 /* linux/mm/vmscan.c */
310 extern unsigned long zone_reclaimable_pages(struct zone
*zone
);
311 extern unsigned long pgdat_reclaimable_pages(struct pglist_data
*pgdat
);
312 extern unsigned long try_to_free_pages(struct zonelist
*zonelist
, int order
,
313 gfp_t gfp_mask
, nodemask_t
*mask
);
314 extern int __isolate_lru_page(struct page
*page
, isolate_mode_t mode
);
315 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup
*memcg
,
316 unsigned long nr_pages
,
319 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup
*mem
,
320 gfp_t gfp_mask
, bool noswap
,
322 unsigned long *nr_scanned
);
323 extern unsigned long shrink_all_memory(unsigned long nr_pages
);
324 extern int vm_swappiness
;
325 extern int remove_mapping(struct address_space
*mapping
, struct page
*page
);
326 extern unsigned long vm_total_pages
;
329 extern int node_reclaim_mode
;
330 extern int sysctl_min_unmapped_ratio
;
331 extern int sysctl_min_slab_ratio
;
332 extern int node_reclaim(struct pglist_data
*, gfp_t
, unsigned int);
334 #define node_reclaim_mode 0
335 static inline int node_reclaim(struct pglist_data
*pgdat
, gfp_t mask
,
342 extern int page_evictable(struct page
*page
);
343 extern void check_move_unevictable_pages(struct page
**, int nr_pages
);
345 extern int kswapd_run(int nid
);
346 extern void kswapd_stop(int nid
);
349 /* linux/mm/page_io.c */
350 extern int swap_readpage(struct page
*);
351 extern int swap_writepage(struct page
*page
, struct writeback_control
*wbc
);
352 extern void end_swap_bio_write(struct bio
*bio
);
353 extern int __swap_writepage(struct page
*page
, struct writeback_control
*wbc
,
354 bio_end_io_t end_write_func
);
355 extern int swap_set_page_dirty(struct page
*page
);
357 int add_swap_extent(struct swap_info_struct
*sis
, unsigned long start_page
,
358 unsigned long nr_pages
, sector_t start_block
);
359 int generic_swapfile_activate(struct swap_info_struct
*, struct file
*,
362 /* linux/mm/swap_state.c */
363 extern struct address_space swapper_spaces
[];
364 #define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
365 extern unsigned long total_swapcache_pages(void);
366 extern void show_swap_cache_info(void);
367 extern int add_to_swap(struct page
*, struct list_head
*list
);
368 extern int add_to_swap_cache(struct page
*, swp_entry_t
, gfp_t
);
369 extern int __add_to_swap_cache(struct page
*page
, swp_entry_t entry
);
370 extern void __delete_from_swap_cache(struct page
*);
371 extern void delete_from_swap_cache(struct page
*);
372 extern void free_page_and_swap_cache(struct page
*);
373 extern void free_pages_and_swap_cache(struct page
**, int);
374 extern struct page
*lookup_swap_cache(swp_entry_t
);
375 extern struct page
*read_swap_cache_async(swp_entry_t
, gfp_t
,
376 struct vm_area_struct
*vma
, unsigned long addr
);
377 extern struct page
*__read_swap_cache_async(swp_entry_t
, gfp_t
,
378 struct vm_area_struct
*vma
, unsigned long addr
,
379 bool *new_page_allocated
);
380 extern struct page
*swapin_readahead(swp_entry_t
, gfp_t
,
381 struct vm_area_struct
*vma
, unsigned long addr
);
383 /* linux/mm/swapfile.c */
384 extern atomic_long_t nr_swap_pages
;
385 extern long total_swap_pages
;
387 /* Swap 50% full? Release swapcache more aggressively.. */
388 static inline bool vm_swap_full(void)
390 return atomic_long_read(&nr_swap_pages
) * 2 < total_swap_pages
;
393 static inline long get_nr_swap_pages(void)
395 return atomic_long_read(&nr_swap_pages
);
398 extern void si_swapinfo(struct sysinfo
*);
399 extern swp_entry_t
get_swap_page(void);
400 extern swp_entry_t
get_swap_page_of_type(int);
401 extern int add_swap_count_continuation(swp_entry_t
, gfp_t
);
402 extern void swap_shmem_alloc(swp_entry_t
);
403 extern int swap_duplicate(swp_entry_t
);
404 extern int swapcache_prepare(swp_entry_t
);
405 extern void swap_free(swp_entry_t
);
406 extern void swapcache_free(swp_entry_t
);
407 extern int free_swap_and_cache(swp_entry_t
);
408 extern int swap_type_of(dev_t
, sector_t
, struct block_device
**);
409 extern unsigned int count_swap_pages(int, int);
410 extern sector_t
map_swap_page(struct page
*, struct block_device
**);
411 extern sector_t
swapdev_block(int, pgoff_t
);
412 extern int page_swapcount(struct page
*);
413 extern int swp_swapcount(swp_entry_t entry
);
414 extern struct swap_info_struct
*page_swap_info(struct page
*);
415 extern bool reuse_swap_page(struct page
*, int *);
416 extern int try_to_free_swap(struct page
*);
417 struct backing_dev_info
;
419 #else /* CONFIG_SWAP */
421 #define swap_address_space(entry) (NULL)
422 #define get_nr_swap_pages() 0L
423 #define total_swap_pages 0L
424 #define total_swapcache_pages() 0UL
425 #define vm_swap_full() 0
427 #define si_swapinfo(val) \
428 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
429 /* only sparc can not include linux/pagemap.h in this file
430 * so leave put_page and release_pages undeclared... */
431 #define free_page_and_swap_cache(page) \
433 #define free_pages_and_swap_cache(pages, nr) \
434 release_pages((pages), (nr), false);
436 static inline void show_swap_cache_info(void)
440 #define free_swap_and_cache(swp) is_migration_entry(swp)
441 #define swapcache_prepare(swp) is_migration_entry(swp)
443 static inline int add_swap_count_continuation(swp_entry_t swp
, gfp_t gfp_mask
)
448 static inline void swap_shmem_alloc(swp_entry_t swp
)
452 static inline int swap_duplicate(swp_entry_t swp
)
457 static inline void swap_free(swp_entry_t swp
)
461 static inline void swapcache_free(swp_entry_t swp
)
465 static inline struct page
*swapin_readahead(swp_entry_t swp
, gfp_t gfp_mask
,
466 struct vm_area_struct
*vma
, unsigned long addr
)
471 static inline int swap_writepage(struct page
*p
, struct writeback_control
*wbc
)
476 static inline struct page
*lookup_swap_cache(swp_entry_t swp
)
481 static inline int add_to_swap(struct page
*page
, struct list_head
*list
)
486 static inline int add_to_swap_cache(struct page
*page
, swp_entry_t entry
,
492 static inline void __delete_from_swap_cache(struct page
*page
)
496 static inline void delete_from_swap_cache(struct page
*page
)
500 static inline int page_swapcount(struct page
*page
)
505 static inline int swp_swapcount(swp_entry_t entry
)
510 #define reuse_swap_page(page, total_mapcount) \
511 (page_trans_huge_mapcount(page, total_mapcount) == 1)
513 static inline int try_to_free_swap(struct page
*page
)
518 static inline swp_entry_t
get_swap_page(void)
525 #endif /* CONFIG_SWAP */
528 static inline int mem_cgroup_swappiness(struct mem_cgroup
*memcg
)
530 /* Cgroup2 doesn't have per-cgroup swappiness */
531 if (cgroup_subsys_on_dfl(memory_cgrp_subsys
))
532 return vm_swappiness
;
535 if (mem_cgroup_disabled() || !memcg
->css
.parent
)
536 return vm_swappiness
;
538 return memcg
->swappiness
;
542 static inline int mem_cgroup_swappiness(struct mem_cgroup
*mem
)
544 return vm_swappiness
;
548 #ifdef CONFIG_MEMCG_SWAP
549 extern void mem_cgroup_swapout(struct page
*page
, swp_entry_t entry
);
550 extern int mem_cgroup_try_charge_swap(struct page
*page
, swp_entry_t entry
);
551 extern void mem_cgroup_uncharge_swap(swp_entry_t entry
);
552 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup
*memcg
);
553 extern bool mem_cgroup_swap_full(struct page
*page
);
555 static inline void mem_cgroup_swapout(struct page
*page
, swp_entry_t entry
)
559 static inline int mem_cgroup_try_charge_swap(struct page
*page
,
565 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry
)
569 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup
*memcg
)
571 return get_nr_swap_pages();
574 static inline bool mem_cgroup_swap_full(struct page
*page
)
576 return vm_swap_full();
580 #endif /* __KERNEL__*/
581 #endif /* _LINUX_SWAP_H */