1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAPOPS_H
3 #define _LINUX_SWAPOPS_H
5 #include <linux/radix-tree.h>
9 * swapcache pages are stored in the swapper_space radix tree. We want to
10 * get good packing density in that tree, so the index should be dense in
13 * We arrange the `type' and `offset' fields so that `type' is at the seven
14 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
15 * remaining bits. Although `type' itself needs only five bits, we allow for
16 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
18 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
20 #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
21 (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
22 #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
25 * Store a type+offset into a swp_entry_t in an arch-independent format
27 static inline swp_entry_t
swp_entry(unsigned long type
, pgoff_t offset
)
31 ret
.val
= (type
<< SWP_TYPE_SHIFT(ret
)) |
32 (offset
& SWP_OFFSET_MASK(ret
));
37 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
38 * arch-independent format
40 static inline unsigned swp_type(swp_entry_t entry
)
42 return (entry
.val
>> SWP_TYPE_SHIFT(entry
));
46 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
47 * arch-independent format
49 static inline pgoff_t
swp_offset(swp_entry_t entry
)
51 return entry
.val
& SWP_OFFSET_MASK(entry
);
55 /* check whether a pte points to a swap entry */
56 static inline int is_swap_pte(pte_t pte
)
58 return !pte_none(pte
) && !pte_present(pte
);
63 * Convert the arch-dependent pte representation of a swp_entry_t into an
64 * arch-independent swp_entry_t.
66 static inline swp_entry_t
pte_to_swp_entry(pte_t pte
)
68 swp_entry_t arch_entry
;
70 if (pte_swp_soft_dirty(pte
))
71 pte
= pte_swp_clear_soft_dirty(pte
);
72 arch_entry
= __pte_to_swp_entry(pte
);
73 return swp_entry(__swp_type(arch_entry
), __swp_offset(arch_entry
));
77 * Convert the arch-independent representation of a swp_entry_t into the
78 * arch-dependent pte representation.
80 static inline pte_t
swp_entry_to_pte(swp_entry_t entry
)
82 swp_entry_t arch_entry
;
84 arch_entry
= __swp_entry(swp_type(entry
), swp_offset(entry
));
85 return __swp_entry_to_pte(arch_entry
);
88 static inline swp_entry_t
radix_to_swp_entry(void *arg
)
92 entry
.val
= (unsigned long)arg
>> RADIX_TREE_EXCEPTIONAL_SHIFT
;
96 static inline void *swp_to_radix_entry(swp_entry_t entry
)
100 value
= entry
.val
<< RADIX_TREE_EXCEPTIONAL_SHIFT
;
101 return (void *)(value
| RADIX_TREE_EXCEPTIONAL_ENTRY
);
104 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
105 static inline swp_entry_t
make_device_private_entry(struct page
*page
, bool write
)
107 return swp_entry(write
? SWP_DEVICE_WRITE
: SWP_DEVICE_READ
,
111 static inline bool is_device_private_entry(swp_entry_t entry
)
113 int type
= swp_type(entry
);
114 return type
== SWP_DEVICE_READ
|| type
== SWP_DEVICE_WRITE
;
117 static inline void make_device_private_entry_read(swp_entry_t
*entry
)
119 *entry
= swp_entry(SWP_DEVICE_READ
, swp_offset(*entry
));
122 static inline bool is_write_device_private_entry(swp_entry_t entry
)
124 return unlikely(swp_type(entry
) == SWP_DEVICE_WRITE
);
127 static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry
)
129 return swp_offset(entry
);
132 static inline struct page
*device_private_entry_to_page(swp_entry_t entry
)
134 return pfn_to_page(swp_offset(entry
));
137 int device_private_entry_fault(struct vm_area_struct
*vma
,
142 #else /* CONFIG_DEVICE_PRIVATE */
143 static inline swp_entry_t
make_device_private_entry(struct page
*page
, bool write
)
145 return swp_entry(0, 0);
148 static inline void make_device_private_entry_read(swp_entry_t
*entry
)
152 static inline bool is_device_private_entry(swp_entry_t entry
)
157 static inline bool is_write_device_private_entry(swp_entry_t entry
)
162 static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry
)
167 static inline struct page
*device_private_entry_to_page(swp_entry_t entry
)
172 static inline int device_private_entry_fault(struct vm_area_struct
*vma
,
178 return VM_FAULT_SIGBUS
;
180 #endif /* CONFIG_DEVICE_PRIVATE */
182 #ifdef CONFIG_MIGRATION
183 static inline swp_entry_t
make_migration_entry(struct page
*page
, int write
)
185 BUG_ON(!PageLocked(compound_head(page
)));
187 return swp_entry(write
? SWP_MIGRATION_WRITE
: SWP_MIGRATION_READ
,
191 static inline int is_migration_entry(swp_entry_t entry
)
193 return unlikely(swp_type(entry
) == SWP_MIGRATION_READ
||
194 swp_type(entry
) == SWP_MIGRATION_WRITE
);
197 static inline int is_write_migration_entry(swp_entry_t entry
)
199 return unlikely(swp_type(entry
) == SWP_MIGRATION_WRITE
);
202 static inline unsigned long migration_entry_to_pfn(swp_entry_t entry
)
204 return swp_offset(entry
);
207 static inline struct page
*migration_entry_to_page(swp_entry_t entry
)
209 struct page
*p
= pfn_to_page(swp_offset(entry
));
211 * Any use of migration entries may only occur while the
212 * corresponding page is locked
214 BUG_ON(!PageLocked(compound_head(p
)));
218 static inline void make_migration_entry_read(swp_entry_t
*entry
)
220 *entry
= swp_entry(SWP_MIGRATION_READ
, swp_offset(*entry
));
223 extern void __migration_entry_wait(struct mm_struct
*mm
, pte_t
*ptep
,
225 extern void migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
,
226 unsigned long address
);
227 extern void migration_entry_wait_huge(struct vm_area_struct
*vma
,
228 struct mm_struct
*mm
, pte_t
*pte
);
231 #define make_migration_entry(page, write) swp_entry(0, 0)
232 static inline int is_migration_entry(swp_entry_t swp
)
237 static inline unsigned long migration_entry_to_pfn(swp_entry_t entry
)
242 static inline struct page
*migration_entry_to_page(swp_entry_t entry
)
247 static inline void make_migration_entry_read(swp_entry_t
*entryp
) { }
248 static inline void __migration_entry_wait(struct mm_struct
*mm
, pte_t
*ptep
,
250 static inline void migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
,
251 unsigned long address
) { }
252 static inline void migration_entry_wait_huge(struct vm_area_struct
*vma
,
253 struct mm_struct
*mm
, pte_t
*pte
) { }
254 static inline int is_write_migration_entry(swp_entry_t entry
)
261 struct page_vma_mapped_walk
;
263 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
264 extern void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
267 extern void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
,
270 extern void pmd_migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
);
272 static inline swp_entry_t
pmd_to_swp_entry(pmd_t pmd
)
274 swp_entry_t arch_entry
;
276 if (pmd_swp_soft_dirty(pmd
))
277 pmd
= pmd_swp_clear_soft_dirty(pmd
);
278 arch_entry
= __pmd_to_swp_entry(pmd
);
279 return swp_entry(__swp_type(arch_entry
), __swp_offset(arch_entry
));
282 static inline pmd_t
swp_entry_to_pmd(swp_entry_t entry
)
284 swp_entry_t arch_entry
;
286 arch_entry
= __swp_entry(swp_type(entry
), swp_offset(entry
));
287 return __swp_entry_to_pmd(arch_entry
);
290 static inline int is_pmd_migration_entry(pmd_t pmd
)
292 return !pmd_present(pmd
) && is_migration_entry(pmd_to_swp_entry(pmd
));
295 static inline void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
301 static inline void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
,
307 static inline void pmd_migration_entry_wait(struct mm_struct
*m
, pmd_t
*p
) { }
309 static inline swp_entry_t
pmd_to_swp_entry(pmd_t pmd
)
311 return swp_entry(0, 0);
314 static inline pmd_t
swp_entry_to_pmd(swp_entry_t entry
)
319 static inline int is_pmd_migration_entry(pmd_t pmd
)
325 #ifdef CONFIG_MEMORY_FAILURE
327 extern atomic_long_t num_poisoned_pages __read_mostly
;
330 * Support for hardware poisoned pages
332 static inline swp_entry_t
make_hwpoison_entry(struct page
*page
)
334 BUG_ON(!PageLocked(page
));
335 return swp_entry(SWP_HWPOISON
, page_to_pfn(page
));
338 static inline int is_hwpoison_entry(swp_entry_t entry
)
340 return swp_type(entry
) == SWP_HWPOISON
;
343 static inline bool test_set_page_hwpoison(struct page
*page
)
345 return TestSetPageHWPoison(page
);
348 static inline void num_poisoned_pages_inc(void)
350 atomic_long_inc(&num_poisoned_pages
);
353 static inline void num_poisoned_pages_dec(void)
355 atomic_long_dec(&num_poisoned_pages
);
360 static inline swp_entry_t
make_hwpoison_entry(struct page
*page
)
362 return swp_entry(0, 0);
365 static inline int is_hwpoison_entry(swp_entry_t swp
)
370 static inline bool test_set_page_hwpoison(struct page
*page
)
375 static inline void num_poisoned_pages_inc(void)
380 #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
381 static inline int non_swap_entry(swp_entry_t entry
)
383 return swp_type(entry
) >= MAX_SWAPFILES
;
386 static inline int non_swap_entry(swp_entry_t entry
)
392 #endif /* _LINUX_SWAPOPS_H */