1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/spinlock.h>
6 #include <linux/linkage.h>
7 #include <linux/mmzone.h>
8 #include <linux/list.h>
9 #include <linux/memcontrol.h>
10 #include <linux/sched.h>
11 #include <linux/node.h>
13 #include <linux/pagemap.h>
14 #include <linux/atomic.h>
15 #include <linux/page-flags.h>
16 #include <uapi/linux/mempolicy.h>
19 struct notifier_block
;
25 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
26 #define SWAP_FLAG_PRIO_MASK 0x7fff
27 #define SWAP_FLAG_PRIO_SHIFT 0
28 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
29 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
32 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 SWAP_FLAG_DISCARD_PAGES)
37 static inline int current_is_kswapd(void)
39 return current
->flags
& PF_KSWAPD
;
43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44 * be swapped to. The swap type and the offset into that swap type are
45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
46 * for the type means that the maximum number of swapcache pages is 27 bits
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
48 * the type/offset into the pte as 5/27 as well.
50 #define MAX_SWAPFILES_SHIFT 5
53 * Use some of the swap files numbers for other purposes. This
54 * is a convenient way to hook into the VM to trigger special
59 * PTE markers are used to persist information onto PTEs that otherwise
60 * should be a none pte. As its name "PTE" hints, it should only be
61 * applied to the leaves of pgtables.
63 #define SWP_PTE_MARKER_NUM 1
64 #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
65 SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
68 * Unaddressable device memory support. See include/linux/hmm.h and
69 * Documentation/mm/hmm.rst. Short description is we need struct pages for
70 * device memory that is unaddressable (inaccessible) by CPU, so that we can
71 * migrate part of a process memory to device memory.
73 * When a page is migrated from CPU to device, we set the CPU page table entry
74 * to a special SWP_DEVICE_{READ|WRITE} entry.
76 * When a page is mapped by the device for exclusive access we set the CPU page
77 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
79 #ifdef CONFIG_DEVICE_PRIVATE
80 #define SWP_DEVICE_NUM 4
81 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
82 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
83 #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
84 #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
86 #define SWP_DEVICE_NUM 0
90 * Page migration support.
92 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
93 * indicates that the referenced (part of) an anonymous page is exclusive to
94 * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
95 * (part of) an anonymous page that are mapped writable are exclusive to a
98 #ifdef CONFIG_MIGRATION
99 #define SWP_MIGRATION_NUM 3
100 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
101 #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
102 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
104 #define SWP_MIGRATION_NUM 0
108 * Handling of hardware poisoned pages with memory corruption.
110 #ifdef CONFIG_MEMORY_FAILURE
111 #define SWP_HWPOISON_NUM 1
112 #define SWP_HWPOISON MAX_SWAPFILES
114 #define SWP_HWPOISON_NUM 0
117 #define MAX_SWAPFILES \
118 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
119 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
123 * Magic header for a swap area. The first part of the union is
124 * what the swap magic looks like for the old (limited to 128MB)
125 * swap area format, the second part of the union adds - in the
126 * old reserved area - some extra information. Note that the first
127 * kilobyte is reserved for boot loader or disk label stuff...
129 * Having the magic at the end of the PAGE_SIZE makes detecting swap
130 * areas somewhat tricky on machines that support multiple page sizes.
131 * For 2.5 we'll probably want to move the magic to just beyond the
136 char reserved
[PAGE_SIZE
- 10];
137 char magic
[10]; /* SWAP-SPACE or SWAPSPACE2 */
140 char bootbits
[1024]; /* Space for disklabel etc. */
144 unsigned char sws_uuid
[16];
145 unsigned char sws_volume
[16];
152 * current->reclaim_state points to one of these when a task is running
155 struct reclaim_state
{
156 /* pages reclaimed outside of LRU-based reclaim */
157 unsigned long reclaimed
;
158 #ifdef CONFIG_LRU_GEN
159 /* per-thread mm walk data */
160 struct lru_gen_mm_walk
*mm_walk
;
165 * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
167 * @pages: number of pages reclaimed
169 * If the current process is undergoing a reclaim operation, increment the
170 * number of reclaimed pages by @pages.
172 static inline void mm_account_reclaimed_pages(unsigned long pages
)
174 if (current
->reclaim_state
)
175 current
->reclaim_state
->reclaimed
+= pages
;
180 struct address_space
;
182 struct writeback_control
;
186 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
187 * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
188 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
189 * from setup, they're handled identically.
191 * We always assume that blocks are of size PAGE_SIZE.
194 struct rb_node rb_node
;
197 sector_t start_block
;
201 * Max bad pages in the new format..
203 #define MAX_SWAP_BADPAGES \
204 ((offsetof(union swap_header, magic.magic) - \
205 offsetof(union swap_header, info.badpages)) / sizeof(int))
208 SWP_USED
= (1 << 0), /* is slot in swap_info[] used? */
209 SWP_WRITEOK
= (1 << 1), /* ok to write to this swap? */
210 SWP_DISCARDABLE
= (1 << 2), /* blkdev support discard */
211 SWP_DISCARDING
= (1 << 3), /* now discarding a free cluster */
212 SWP_SOLIDSTATE
= (1 << 4), /* blkdev seeks are cheap */
213 SWP_CONTINUED
= (1 << 5), /* swap_map has count continuation */
214 SWP_BLKDEV
= (1 << 6), /* its a block device */
215 SWP_ACTIVATED
= (1 << 7), /* set after swap_activate success */
216 SWP_FS_OPS
= (1 << 8), /* swapfile operations go through fs */
217 SWP_AREA_DISCARD
= (1 << 9), /* single-time swap area discards */
218 SWP_PAGE_DISCARD
= (1 << 10), /* freed swap page-cluster discards */
219 SWP_STABLE_WRITES
= (1 << 11), /* no overwrite PG_writeback pages */
220 SWP_SYNCHRONOUS_IO
= (1 << 12), /* synchronous IO is efficient */
221 /* add others here before... */
222 SWP_SCANNING
= (1 << 14), /* refcount in scan_swap_map */
225 #define SWAP_CLUSTER_MAX 32UL
226 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
228 /* Bit flag in swap_map */
229 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
230 #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
232 /* Special value in first swap_map */
233 #define SWAP_MAP_MAX 0x3e /* Max count */
234 #define SWAP_MAP_BAD 0x3f /* Note page is bad */
235 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
237 /* Special value in each swap_map continuation */
238 #define SWAP_CONT_MAX 0x7f /* Max count */
241 * We use this to track usage of a cluster. A cluster is a block of swap disk
242 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
243 * free clusters are organized into a list. We fetch an entry from the list to
244 * get a free cluster.
246 * The flags field determines if a cluster is free. This is
247 * protected by cluster lock.
249 struct swap_cluster_info
{
251 * Protect swap_cluster_info fields
252 * other than list, and swap_info_struct->swap_map
253 * elements corresponding to the swap cluster.
258 struct list_head list
;
260 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
261 #define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
262 #define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */
263 #define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */
266 * The first page in the swap file is the swap header, which is always marked
267 * bad to prevent it from being allocated as an entry. This also prevents the
268 * cluster to which it belongs being marked free. Therefore 0 is safe to use as
269 * a sentinel to indicate next is not valid in percpu_cluster.
271 #define SWAP_NEXT_INVALID 0
273 #ifdef CONFIG_THP_SWAP
274 #define SWAP_NR_ORDERS (PMD_ORDER + 1)
276 #define SWAP_NR_ORDERS 1
280 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
281 * its own cluster and swapout sequentially. The purpose is to optimize swapout
284 struct percpu_cluster
{
285 unsigned int next
[SWAP_NR_ORDERS
]; /* Likely next allocation offset */
289 * The in-memory structure used to track swap areas.
291 struct swap_info_struct
{
292 struct percpu_ref users
; /* indicate and keep swap device valid. */
293 unsigned long flags
; /* SWP_USED etc: see above */
294 signed short prio
; /* swap priority of this type */
295 struct plist_node list
; /* entry in swap_active_head */
296 signed char type
; /* strange name for an index */
297 unsigned int max
; /* extent of the swap_map */
298 unsigned char *swap_map
; /* vmalloc'ed array of usage counts */
299 unsigned long *zeromap
; /* kvmalloc'ed bitmap to track zero pages */
300 struct swap_cluster_info
*cluster_info
; /* cluster info. Only for SSD */
301 struct list_head free_clusters
; /* free clusters list */
302 struct list_head full_clusters
; /* full clusters list */
303 struct list_head nonfull_clusters
[SWAP_NR_ORDERS
];
304 /* list of cluster that contains at least one free slot */
305 struct list_head frag_clusters
[SWAP_NR_ORDERS
];
306 /* list of cluster that are fragmented or contented */
307 unsigned int frag_cluster_nr
[SWAP_NR_ORDERS
];
308 unsigned int lowest_bit
; /* index of first free in swap_map */
309 unsigned int highest_bit
; /* index of last free in swap_map */
310 unsigned int pages
; /* total of usable pages of swap */
311 unsigned int inuse_pages
; /* number of those currently in use */
312 unsigned int cluster_next
; /* likely index for next allocation */
313 unsigned int cluster_nr
; /* countdown to next cluster search */
314 unsigned int __percpu
*cluster_next_cpu
; /*percpu index for next allocation */
315 struct percpu_cluster __percpu
*percpu_cluster
; /* per cpu's swap location */
316 struct rb_root swap_extent_root
;/* root of the swap extent rbtree */
317 struct block_device
*bdev
; /* swap device or bdev of swap file */
318 struct file
*swap_file
; /* seldom referenced */
319 struct completion comp
; /* seldom referenced */
321 * protect map scan related fields like
322 * swap_map, lowest_bit, highest_bit,
323 * inuse_pages, cluster_next,
324 * cluster_nr, lowest_alloc,
325 * highest_alloc, free/discard cluster
326 * list. other fields are only changed
327 * at swapon/swapoff, so are protected
328 * by swap_lock. changing flags need
329 * hold this lock and swap_lock. If
330 * both locks need hold, hold swap_lock
333 spinlock_t cont_lock
; /*
334 * protect swap count continuation page
337 struct work_struct discard_work
; /* discard worker */
338 struct work_struct reclaim_work
; /* reclaim worker */
339 struct list_head discard_clusters
; /* discard clusters list */
340 struct plist_node avail_lists
[]; /*
341 * entries in swap_avail_heads, one
343 * Must be last as the number of the
344 * array is nr_node_ids, which is not
345 * a fixed value so have to allocate
347 * And it has to be an array so that
348 * plist_for_each_* can work.
352 static inline swp_entry_t
page_swap_entry(struct page
*page
)
354 struct folio
*folio
= page_folio(page
);
355 swp_entry_t entry
= folio
->swap
;
357 entry
.val
+= folio_page_idx(folio
, page
);
361 /* linux/mm/workingset.c */
362 bool workingset_test_recent(void *shadow
, bool file
, bool *workingset
,
364 void workingset_age_nonresident(struct lruvec
*lruvec
, unsigned long nr_pages
);
365 void *workingset_eviction(struct folio
*folio
, struct mem_cgroup
*target_memcg
);
366 void workingset_refault(struct folio
*folio
, void *shadow
);
367 void workingset_activation(struct folio
*folio
);
369 /* linux/mm/page_alloc.c */
370 extern unsigned long totalreserve_pages
;
372 /* Definition of global_zone_page_state not available yet */
373 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
376 /* linux/mm/swap.c */
377 void lru_note_cost(struct lruvec
*lruvec
, bool file
,
378 unsigned int nr_io
, unsigned int nr_rotated
);
379 void lru_note_cost_refault(struct folio
*);
380 void folio_add_lru(struct folio
*);
381 void folio_add_lru_vma(struct folio
*, struct vm_area_struct
*);
382 void mark_page_accessed(struct page
*);
383 void folio_mark_accessed(struct folio
*);
385 extern atomic_t lru_disable_count
;
387 static inline bool lru_cache_disabled(void)
389 return atomic_read(&lru_disable_count
);
392 static inline void lru_cache_enable(void)
394 atomic_dec(&lru_disable_count
);
397 extern void lru_cache_disable(void);
398 extern void lru_add_drain(void);
399 extern void lru_add_drain_cpu(int cpu
);
400 extern void lru_add_drain_cpu_zone(struct zone
*zone
);
401 extern void lru_add_drain_all(void);
402 void folio_deactivate(struct folio
*folio
);
403 void folio_mark_lazyfree(struct folio
*folio
);
404 extern void swap_setup(void);
406 /* linux/mm/vmscan.c */
407 extern unsigned long zone_reclaimable_pages(struct zone
*zone
);
408 extern unsigned long try_to_free_pages(struct zonelist
*zonelist
, int order
,
409 gfp_t gfp_mask
, nodemask_t
*mask
);
411 #define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
412 #define MEMCG_RECLAIM_PROACTIVE (1 << 2)
413 #define MIN_SWAPPINESS 0
414 #define MAX_SWAPPINESS 200
415 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup
*memcg
,
416 unsigned long nr_pages
,
418 unsigned int reclaim_options
,
420 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup
*mem
,
421 gfp_t gfp_mask
, bool noswap
,
423 unsigned long *nr_scanned
);
424 extern unsigned long shrink_all_memory(unsigned long nr_pages
);
425 extern int vm_swappiness
;
426 long remove_mapping(struct address_space
*mapping
, struct folio
*folio
);
429 extern int node_reclaim_mode
;
430 extern int sysctl_min_unmapped_ratio
;
431 extern int sysctl_min_slab_ratio
;
433 #define node_reclaim_mode 0
436 static inline bool node_reclaim_enabled(void)
438 /* Is any node_reclaim_mode bit set? */
439 return node_reclaim_mode
& (RECLAIM_ZONE
|RECLAIM_WRITE
|RECLAIM_UNMAP
);
442 void check_move_unevictable_folios(struct folio_batch
*fbatch
);
444 extern void __meminit
kswapd_run(int nid
);
445 extern void __meminit
kswapd_stop(int nid
);
449 int add_swap_extent(struct swap_info_struct
*sis
, unsigned long start_page
,
450 unsigned long nr_pages
, sector_t start_block
);
451 int generic_swapfile_activate(struct swap_info_struct
*, struct file
*,
454 static inline unsigned long total_swapcache_pages(void)
456 return global_node_page_state(NR_SWAPCACHE
);
459 void free_swap_cache(struct folio
*folio
);
460 void free_page_and_swap_cache(struct page
*);
461 void free_pages_and_swap_cache(struct encoded_page
**, int);
462 /* linux/mm/swapfile.c */
463 extern atomic_long_t nr_swap_pages
;
464 extern long total_swap_pages
;
465 extern atomic_t nr_rotate_swap
;
466 extern bool has_usable_swap(void);
468 /* Swap 50% full? Release swapcache more aggressively.. */
469 static inline bool vm_swap_full(void)
471 return atomic_long_read(&nr_swap_pages
) * 2 < total_swap_pages
;
474 static inline long get_nr_swap_pages(void)
476 return atomic_long_read(&nr_swap_pages
);
479 extern void si_swapinfo(struct sysinfo
*);
480 swp_entry_t
folio_alloc_swap(struct folio
*folio
);
481 bool folio_free_swap(struct folio
*folio
);
482 void put_swap_folio(struct folio
*folio
, swp_entry_t entry
);
483 extern swp_entry_t
get_swap_page_of_type(int);
484 extern int get_swap_pages(int n
, swp_entry_t swp_entries
[], int order
);
485 extern int add_swap_count_continuation(swp_entry_t
, gfp_t
);
486 extern void swap_shmem_alloc(swp_entry_t
, int);
487 extern int swap_duplicate(swp_entry_t
);
488 extern int swapcache_prepare(swp_entry_t entry
, int nr
);
489 extern void swap_free_nr(swp_entry_t entry
, int nr_pages
);
490 extern void swapcache_free_entries(swp_entry_t
*entries
, int n
);
491 extern void free_swap_and_cache_nr(swp_entry_t entry
, int nr
);
492 int swap_type_of(dev_t device
, sector_t offset
);
493 int find_first_swap(dev_t
*device
);
494 extern unsigned int count_swap_pages(int, int);
495 extern sector_t
swapdev_block(int, pgoff_t
);
496 extern int __swap_count(swp_entry_t entry
);
497 extern int swap_swapcount(struct swap_info_struct
*si
, swp_entry_t entry
);
498 extern int swp_swapcount(swp_entry_t entry
);
499 struct swap_info_struct
*swp_swap_info(swp_entry_t entry
);
500 struct backing_dev_info
;
501 extern int init_swap_address_space(unsigned int type
, unsigned long nr_pages
);
502 extern void exit_swap_address_space(unsigned int type
);
503 extern struct swap_info_struct
*get_swap_device(swp_entry_t entry
);
504 sector_t
swap_folio_sector(struct folio
*folio
);
506 static inline void put_swap_device(struct swap_info_struct
*si
)
508 percpu_ref_put(&si
->users
);
511 #else /* CONFIG_SWAP */
512 static inline struct swap_info_struct
*swp_swap_info(swp_entry_t entry
)
517 static inline struct swap_info_struct
*get_swap_device(swp_entry_t entry
)
522 static inline void put_swap_device(struct swap_info_struct
*si
)
526 #define get_nr_swap_pages() 0L
527 #define total_swap_pages 0L
528 #define total_swapcache_pages() 0UL
529 #define vm_swap_full() 0
531 #define si_swapinfo(val) \
532 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
533 /* only sparc can not include linux/pagemap.h in this file
534 * so leave put_page and release_pages undeclared... */
535 #define free_page_and_swap_cache(page) \
537 #define free_pages_and_swap_cache(pages, nr) \
538 release_pages((pages), (nr));
540 static inline void free_swap_and_cache_nr(swp_entry_t entry
, int nr
)
544 static inline void free_swap_cache(struct folio
*folio
)
548 static inline int add_swap_count_continuation(swp_entry_t swp
, gfp_t gfp_mask
)
553 static inline void swap_shmem_alloc(swp_entry_t swp
, int nr
)
557 static inline int swap_duplicate(swp_entry_t swp
)
562 static inline int swapcache_prepare(swp_entry_t swp
, int nr
)
567 static inline void swap_free_nr(swp_entry_t entry
, int nr_pages
)
571 static inline void put_swap_folio(struct folio
*folio
, swp_entry_t swp
)
575 static inline int __swap_count(swp_entry_t entry
)
580 static inline int swap_swapcount(struct swap_info_struct
*si
, swp_entry_t entry
)
585 static inline int swp_swapcount(swp_entry_t entry
)
590 static inline swp_entry_t
folio_alloc_swap(struct folio
*folio
)
597 static inline bool folio_free_swap(struct folio
*folio
)
602 static inline int add_swap_extent(struct swap_info_struct
*sis
,
603 unsigned long start_page
,
604 unsigned long nr_pages
, sector_t start_block
)
608 #endif /* CONFIG_SWAP */
610 static inline void free_swap_and_cache(swp_entry_t entry
)
612 free_swap_and_cache_nr(entry
, 1);
615 static inline void swap_free(swp_entry_t entry
)
617 swap_free_nr(entry
, 1);
621 static inline int mem_cgroup_swappiness(struct mem_cgroup
*memcg
)
623 /* Cgroup2 doesn't have per-cgroup swappiness */
624 if (cgroup_subsys_on_dfl(memory_cgrp_subsys
))
625 return READ_ONCE(vm_swappiness
);
628 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg
))
629 return READ_ONCE(vm_swappiness
);
631 return READ_ONCE(memcg
->swappiness
);
634 static inline int mem_cgroup_swappiness(struct mem_cgroup
*mem
)
636 return READ_ONCE(vm_swappiness
);
640 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
641 void __folio_throttle_swaprate(struct folio
*folio
, gfp_t gfp
);
642 static inline void folio_throttle_swaprate(struct folio
*folio
, gfp_t gfp
)
644 if (mem_cgroup_disabled())
646 __folio_throttle_swaprate(folio
, gfp
);
649 static inline void folio_throttle_swaprate(struct folio
*folio
, gfp_t gfp
)
654 #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
655 void mem_cgroup_swapout(struct folio
*folio
, swp_entry_t entry
);
656 int __mem_cgroup_try_charge_swap(struct folio
*folio
, swp_entry_t entry
);
657 static inline int mem_cgroup_try_charge_swap(struct folio
*folio
,
660 if (mem_cgroup_disabled())
662 return __mem_cgroup_try_charge_swap(folio
, entry
);
665 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry
, unsigned int nr_pages
);
666 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry
, unsigned int nr_pages
)
668 if (mem_cgroup_disabled())
670 __mem_cgroup_uncharge_swap(entry
, nr_pages
);
673 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup
*memcg
);
674 extern bool mem_cgroup_swap_full(struct folio
*folio
);
676 static inline void mem_cgroup_swapout(struct folio
*folio
, swp_entry_t entry
)
680 static inline int mem_cgroup_try_charge_swap(struct folio
*folio
,
686 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry
,
687 unsigned int nr_pages
)
691 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup
*memcg
)
693 return get_nr_swap_pages();
696 static inline bool mem_cgroup_swap_full(struct folio
*folio
)
698 return vm_swap_full();
702 #endif /* __KERNEL__*/
703 #endif /* _LINUX_SWAP_H */