1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MIGRATE_H
3 #define _LINUX_MIGRATE_H
6 #include <linux/mempolicy.h>
7 #include <linux/migrate_mode.h>
8 #include <linux/hugetlb.h>
10 typedef struct folio
*new_folio_t(struct folio
*folio
, unsigned long private);
11 typedef void free_folio_t(struct folio
*folio
, unsigned long private);
13 struct migration_target_control
;
16 * Return values from addresss_space_operations.migratepage():
17 * - negative errno on page migration failure;
18 * - zero on page migration success;
20 #define MIGRATEPAGE_SUCCESS 0
21 #define MIGRATEPAGE_UNMAP 1
24 * struct movable_operations - Driver page migration
26 * The VM calls this function to prepare the page to be moved. The page
27 * is locked and the driver should not unlock it. The driver should
28 * return ``true`` if the page is movable and ``false`` if it is not
29 * currently movable. After this function returns, the VM uses the
30 * page->lru field, so the driver must preserve any information which
31 * is usually stored here.
34 * After isolation, the VM calls this function with the isolated
35 * @src page. The driver should copy the contents of the
36 * @src page to the @dst page and set up the fields of @dst page.
37 * Both pages are locked.
38 * If page migration is successful, the driver should call
39 * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
40 * If the driver cannot migrate the page at the moment, it can return
41 * -EAGAIN. The VM interprets this as a temporary migration failure and
42 * will retry it later. Any other error value is a permanent migration
43 * failure and migration will not be retried.
44 * The driver shouldn't touch the @src->lru field while in the
45 * migrate_page() function. It may write to @dst->lru.
48 * If migration fails on the isolated page, the VM informs the driver
49 * that the page is no longer a candidate for migration by calling
50 * this function. The driver should put the isolated page back into
51 * its own data structure.
53 struct movable_operations
{
54 bool (*isolate_page
)(struct page
*, isolate_mode_t
);
55 int (*migrate_page
)(struct page
*dst
, struct page
*src
,
57 void (*putback_page
)(struct page
*);
60 /* Defined in mm/debug.c: */
61 extern const char *migrate_reason_names
[MR_TYPES
];
63 #ifdef CONFIG_MIGRATION
65 void putback_movable_pages(struct list_head
*l
);
66 int migrate_folio(struct address_space
*mapping
, struct folio
*dst
,
67 struct folio
*src
, enum migrate_mode mode
);
68 int migrate_pages(struct list_head
*l
, new_folio_t
new, free_folio_t free
,
69 unsigned long private, enum migrate_mode mode
, int reason
,
70 unsigned int *ret_succeeded
);
71 struct folio
*alloc_migration_target(struct folio
*src
, unsigned long private);
72 bool isolate_movable_page(struct page
*page
, isolate_mode_t mode
);
73 bool isolate_folio_to_list(struct folio
*folio
, struct list_head
*list
);
75 int migrate_huge_page_move_mapping(struct address_space
*mapping
,
76 struct folio
*dst
, struct folio
*src
);
77 void migration_entry_wait_on_locked(swp_entry_t entry
, spinlock_t
*ptl
)
79 void folio_migrate_flags(struct folio
*newfolio
, struct folio
*folio
);
80 int folio_migrate_mapping(struct address_space
*mapping
,
81 struct folio
*newfolio
, struct folio
*folio
, int extra_count
);
85 static inline void putback_movable_pages(struct list_head
*l
) {}
86 static inline int migrate_pages(struct list_head
*l
, new_folio_t
new,
87 free_folio_t free
, unsigned long private,
88 enum migrate_mode mode
, int reason
, unsigned int *ret_succeeded
)
90 static inline struct folio
*alloc_migration_target(struct folio
*src
,
91 unsigned long private)
93 static inline bool isolate_movable_page(struct page
*page
, isolate_mode_t mode
)
95 static inline bool isolate_folio_to_list(struct folio
*folio
, struct list_head
*list
)
98 static inline int migrate_huge_page_move_mapping(struct address_space
*mapping
,
99 struct folio
*dst
, struct folio
*src
)
104 #endif /* CONFIG_MIGRATION */
106 #ifdef CONFIG_COMPACTION
107 bool PageMovable(struct page
*page
);
108 void __SetPageMovable(struct page
*page
, const struct movable_operations
*ops
);
109 void __ClearPageMovable(struct page
*page
);
111 static inline bool PageMovable(struct page
*page
) { return false; }
112 static inline void __SetPageMovable(struct page
*page
,
113 const struct movable_operations
*ops
)
116 static inline void __ClearPageMovable(struct page
*page
)
121 static inline bool folio_test_movable(struct folio
*folio
)
123 return PageMovable(&folio
->page
);
127 const struct movable_operations
*folio_movable_ops(struct folio
*folio
)
129 VM_BUG_ON(!__folio_test_movable(folio
));
131 return (const struct movable_operations
*)
132 ((unsigned long)folio
->mapping
- PAGE_MAPPING_MOVABLE
);
136 const struct movable_operations
*page_movable_ops(struct page
*page
)
138 VM_BUG_ON(!__PageMovable(page
));
140 return (const struct movable_operations
*)
141 ((unsigned long)page
->mapping
- PAGE_MAPPING_MOVABLE
);
144 #ifdef CONFIG_NUMA_BALANCING
145 int migrate_misplaced_folio_prepare(struct folio
*folio
,
146 struct vm_area_struct
*vma
, int node
);
147 int migrate_misplaced_folio(struct folio
*folio
, struct vm_area_struct
*vma
,
150 static inline int migrate_misplaced_folio_prepare(struct folio
*folio
,
151 struct vm_area_struct
*vma
, int node
)
153 return -EAGAIN
; /* can't migrate now */
155 static inline int migrate_misplaced_folio(struct folio
*folio
,
156 struct vm_area_struct
*vma
, int node
)
158 return -EAGAIN
; /* can't migrate now */
160 #endif /* CONFIG_NUMA_BALANCING */
162 #ifdef CONFIG_MIGRATION
165 * Watch out for PAE architecture, which has an unsigned long, and might not
166 * have enough bits to store all physical address and flags. So far we have
167 * enough room for all our flags.
169 #define MIGRATE_PFN_VALID (1UL << 0)
170 #define MIGRATE_PFN_MIGRATE (1UL << 1)
171 #define MIGRATE_PFN_WRITE (1UL << 3)
172 #define MIGRATE_PFN_SHIFT 6
174 static inline struct page
*migrate_pfn_to_page(unsigned long mpfn
)
176 if (!(mpfn
& MIGRATE_PFN_VALID
))
178 return pfn_to_page(mpfn
>> MIGRATE_PFN_SHIFT
);
181 static inline unsigned long migrate_pfn(unsigned long pfn
)
183 return (pfn
<< MIGRATE_PFN_SHIFT
) | MIGRATE_PFN_VALID
;
186 enum migrate_vma_direction
{
187 MIGRATE_VMA_SELECT_SYSTEM
= 1 << 0,
188 MIGRATE_VMA_SELECT_DEVICE_PRIVATE
= 1 << 1,
189 MIGRATE_VMA_SELECT_DEVICE_COHERENT
= 1 << 2,
193 struct vm_area_struct
*vma
;
195 * Both src and dst array must be big enough for
196 * (end - start) >> PAGE_SHIFT entries.
198 * The src array must not be modified by the caller after
199 * migrate_vma_setup(), and must not change the dst array after
200 * migrate_vma_pages() returns.
204 unsigned long cpages
;
205 unsigned long npages
;
210 * Set to the owner value also stored in page->pgmap->owner for
211 * migrating out of device private memory. The flags also need to
212 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
213 * The caller should always set this field when using mmu notifier
214 * callbacks to avoid device MMU invalidations for device private
215 * pages that are not being migrated.
221 * Set to vmf->page if this is being called to migrate a page as part of
222 * a migrate_to_ram() callback.
224 struct page
*fault_page
;
227 int migrate_vma_setup(struct migrate_vma
*args
);
228 void migrate_vma_pages(struct migrate_vma
*migrate
);
229 void migrate_vma_finalize(struct migrate_vma
*migrate
);
230 int migrate_device_range(unsigned long *src_pfns
, unsigned long start
,
231 unsigned long npages
);
232 void migrate_device_pages(unsigned long *src_pfns
, unsigned long *dst_pfns
,
233 unsigned long npages
);
234 void migrate_device_finalize(unsigned long *src_pfns
,
235 unsigned long *dst_pfns
, unsigned long npages
);
237 #endif /* CONFIG_MIGRATION */
239 #endif /* _LINUX_MIGRATE_H */