1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MIGRATE_H
3 #define _LINUX_MIGRATE_H
6 #include <linux/mempolicy.h>
7 #include <linux/migrate_mode.h>
8 #include <linux/hugetlb.h>
10 typedef struct page
*new_page_t(struct page
*page
, unsigned long private,
12 typedef void free_page_t(struct page
*page
, unsigned long private);
15 * Return values from addresss_space_operations.migratepage():
16 * - negative errno on page migration failure;
17 * - zero on page migration success;
19 #define MIGRATEPAGE_SUCCESS 0
25 MR_SYSCALL
, /* also applies to cpusets */
32 /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
33 extern char *migrate_reason_names
[MR_TYPES
];
35 static inline struct page
*new_page_nodemask(struct page
*page
,
36 int preferred_nid
, nodemask_t
*nodemask
)
38 gfp_t gfp_mask
= GFP_USER
| __GFP_MOVABLE
| __GFP_RETRY_MAYFAIL
;
39 unsigned int order
= 0;
40 struct page
*new_page
= NULL
;
43 return alloc_huge_page_nodemask(page_hstate(compound_head(page
)),
44 preferred_nid
, nodemask
);
46 if (thp_migration_supported() && PageTransHuge(page
)) {
47 order
= HPAGE_PMD_ORDER
;
48 gfp_mask
|= GFP_TRANSHUGE
;
51 if (PageHighMem(page
) || (zone_idx(page_zone(page
)) == ZONE_MOVABLE
))
52 gfp_mask
|= __GFP_HIGHMEM
;
54 new_page
= __alloc_pages_nodemask(gfp_mask
, order
,
55 preferred_nid
, nodemask
);
57 if (new_page
&& PageTransHuge(new_page
))
58 prep_transhuge_page(new_page
);
63 #ifdef CONFIG_MIGRATION
65 extern void putback_movable_pages(struct list_head
*l
);
66 extern int migrate_page(struct address_space
*mapping
,
67 struct page
*newpage
, struct page
*page
,
68 enum migrate_mode mode
);
69 extern int migrate_pages(struct list_head
*l
, new_page_t
new, free_page_t free
,
70 unsigned long private, enum migrate_mode mode
, int reason
);
71 extern int isolate_movable_page(struct page
*page
, isolate_mode_t mode
);
72 extern void putback_movable_page(struct page
*page
);
74 extern int migrate_prep(void);
75 extern int migrate_prep_local(void);
76 extern void migrate_page_states(struct page
*newpage
, struct page
*page
);
77 extern void migrate_page_copy(struct page
*newpage
, struct page
*page
);
78 extern int migrate_huge_page_move_mapping(struct address_space
*mapping
,
79 struct page
*newpage
, struct page
*page
);
80 extern int migrate_page_move_mapping(struct address_space
*mapping
,
81 struct page
*newpage
, struct page
*page
,
82 struct buffer_head
*head
, enum migrate_mode mode
,
86 static inline void putback_movable_pages(struct list_head
*l
) {}
87 static inline int migrate_pages(struct list_head
*l
, new_page_t
new,
88 free_page_t free
, unsigned long private, enum migrate_mode mode
,
91 static inline int isolate_movable_page(struct page
*page
, isolate_mode_t mode
)
94 static inline int migrate_prep(void) { return -ENOSYS
; }
95 static inline int migrate_prep_local(void) { return -ENOSYS
; }
97 static inline void migrate_page_states(struct page
*newpage
, struct page
*page
)
101 static inline void migrate_page_copy(struct page
*newpage
,
102 struct page
*page
) {}
104 static inline int migrate_huge_page_move_mapping(struct address_space
*mapping
,
105 struct page
*newpage
, struct page
*page
)
110 #endif /* CONFIG_MIGRATION */
112 #ifdef CONFIG_COMPACTION
113 extern int PageMovable(struct page
*page
);
114 extern void __SetPageMovable(struct page
*page
, struct address_space
*mapping
);
115 extern void __ClearPageMovable(struct page
*page
);
117 static inline int PageMovable(struct page
*page
) { return 0; };
118 static inline void __SetPageMovable(struct page
*page
,
119 struct address_space
*mapping
)
122 static inline void __ClearPageMovable(struct page
*page
)
127 #ifdef CONFIG_NUMA_BALANCING
128 extern bool pmd_trans_migrating(pmd_t pmd
);
129 extern int migrate_misplaced_page(struct page
*page
,
130 struct vm_area_struct
*vma
, int node
);
132 static inline bool pmd_trans_migrating(pmd_t pmd
)
136 static inline int migrate_misplaced_page(struct page
*page
,
137 struct vm_area_struct
*vma
, int node
)
139 return -EAGAIN
; /* can't migrate now */
141 #endif /* CONFIG_NUMA_BALANCING */
143 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
144 extern int migrate_misplaced_transhuge_page(struct mm_struct
*mm
,
145 struct vm_area_struct
*vma
,
146 pmd_t
*pmd
, pmd_t entry
,
147 unsigned long address
,
148 struct page
*page
, int node
);
150 static inline int migrate_misplaced_transhuge_page(struct mm_struct
*mm
,
151 struct vm_area_struct
*vma
,
152 pmd_t
*pmd
, pmd_t entry
,
153 unsigned long address
,
154 struct page
*page
, int node
)
158 #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
161 #ifdef CONFIG_MIGRATION
164 * Watch out for PAE architecture, which has an unsigned long, and might not
165 * have enough bits to store all physical address and flags. So far we have
166 * enough room for all our flags.
168 #define MIGRATE_PFN_VALID (1UL << 0)
169 #define MIGRATE_PFN_MIGRATE (1UL << 1)
170 #define MIGRATE_PFN_LOCKED (1UL << 2)
171 #define MIGRATE_PFN_WRITE (1UL << 3)
172 #define MIGRATE_PFN_DEVICE (1UL << 4)
173 #define MIGRATE_PFN_ERROR (1UL << 5)
174 #define MIGRATE_PFN_SHIFT 6
176 static inline struct page
*migrate_pfn_to_page(unsigned long mpfn
)
178 if (!(mpfn
& MIGRATE_PFN_VALID
))
180 return pfn_to_page(mpfn
>> MIGRATE_PFN_SHIFT
);
183 static inline unsigned long migrate_pfn(unsigned long pfn
)
185 return (pfn
<< MIGRATE_PFN_SHIFT
) | MIGRATE_PFN_VALID
;
189 * struct migrate_vma_ops - migrate operation callback
191 * @alloc_and_copy: alloc destination memory and copy source memory to it
192 * @finalize_and_map: allow caller to map the successfully migrated pages
195 * The alloc_and_copy() callback happens once all source pages have been locked,
196 * unmapped and checked (checked whether pinned or not). All pages that can be
197 * migrated will have an entry in the src array set with the pfn value of the
198 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other
199 * flags might be set but should be ignored by the callback).
201 * The alloc_and_copy() callback can then allocate destination memory and copy
202 * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and
203 * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the
204 * callback must update each corresponding entry in the dst array with the pfn
205 * value of the destination page and with the MIGRATE_PFN_VALID and
206 * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages
207 * locked, via lock_page()).
209 * At this point the alloc_and_copy() callback is done and returns.
211 * Note that the callback does not have to migrate all the pages that are
212 * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration
213 * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also
214 * set in the src array entry). If the device driver cannot migrate a device
215 * page back to system memory, then it must set the corresponding dst array
216 * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to
217 * access any of the virtual addresses originally backed by this page. Because
218 * a SIGBUS is such a severe result for the userspace process, the device
219 * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an
220 * unrecoverable state.
222 * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we
223 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
224 * allowing device driver to allocate device memory for those unback virtual
225 * address. For this the device driver simply have to allocate device memory
226 * and properly set the destination entry like for regular migration. Note that
227 * this can still fails and thus inside the device driver must check if the
228 * migration was successful for those entry inside the finalize_and_map()
229 * callback just like for regular migration.
231 * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES
232 * OR BAD THINGS WILL HAPPEN !
235 * The finalize_and_map() callback happens after struct page migration from
236 * source to destination (destination struct pages are the struct pages for the
237 * memory allocated by the alloc_and_copy() callback). Migration can fail, and
238 * thus the finalize_and_map() allows the driver to inspect which pages were
239 * successfully migrated, and which were not. Successfully migrated pages will
240 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
242 * It is safe to update device page table from within the finalize_and_map()
243 * callback because both destination and source page are still locked, and the
244 * mmap_sem is held in read mode (hence no one can unmap the range being
247 * Once callback is done cleaning up things and updating its page table (if it
248 * chose to do so, this is not an obligation) then it returns. At this point,
249 * the HMM core will finish up the final steps, and the migration is complete.
251 * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY
252 * ENTRIES OR BAD THINGS WILL HAPPEN !
254 struct migrate_vma_ops
{
255 void (*alloc_and_copy
)(struct vm_area_struct
*vma
,
256 const unsigned long *src
,
261 void (*finalize_and_map
)(struct vm_area_struct
*vma
,
262 const unsigned long *src
,
263 const unsigned long *dst
,
269 #if defined(CONFIG_MIGRATE_VMA_HELPER)
270 int migrate_vma(const struct migrate_vma_ops
*ops
,
271 struct vm_area_struct
*vma
,
278 static inline int migrate_vma(const struct migrate_vma_ops
*ops
,
279 struct vm_area_struct
*vma
,
288 #endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */
290 #endif /* CONFIG_MIGRATION */
292 #endif /* _LINUX_MIGRATE_H */