1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MIGRATE_H
3 #define _LINUX_MIGRATE_H
6 #include <linux/mempolicy.h>
7 #include <linux/migrate_mode.h>
8 #include <linux/hugetlb.h>
10 typedef struct page
*new_page_t(struct page
*page
, unsigned long private);
11 typedef void free_page_t(struct page
*page
, unsigned long private);
14 * Return values from addresss_space_operations.migratepage():
15 * - negative errno on page migration failure;
16 * - zero on page migration success;
18 #define MIGRATEPAGE_SUCCESS 0
24 MR_SYSCALL
, /* also applies to cpusets */
31 /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
32 extern const char *migrate_reason_names
[MR_TYPES
];
34 static inline struct page
*new_page_nodemask(struct page
*page
,
35 int preferred_nid
, nodemask_t
*nodemask
)
37 gfp_t gfp_mask
= GFP_USER
| __GFP_MOVABLE
| __GFP_RETRY_MAYFAIL
;
38 unsigned int order
= 0;
39 struct page
*new_page
= NULL
;
42 return alloc_huge_page_nodemask(page_hstate(compound_head(page
)),
43 preferred_nid
, nodemask
);
45 if (PageTransHuge(page
)) {
46 gfp_mask
|= GFP_TRANSHUGE
;
47 order
= HPAGE_PMD_ORDER
;
50 if (PageHighMem(page
) || (zone_idx(page_zone(page
)) == ZONE_MOVABLE
))
51 gfp_mask
|= __GFP_HIGHMEM
;
53 new_page
= __alloc_pages_nodemask(gfp_mask
, order
,
54 preferred_nid
, nodemask
);
56 if (new_page
&& PageTransHuge(new_page
))
57 prep_transhuge_page(new_page
);
62 #ifdef CONFIG_MIGRATION
64 extern void putback_movable_pages(struct list_head
*l
);
65 extern int migrate_page(struct address_space
*mapping
,
66 struct page
*newpage
, struct page
*page
,
67 enum migrate_mode mode
);
68 extern int migrate_pages(struct list_head
*l
, new_page_t
new, free_page_t free
,
69 unsigned long private, enum migrate_mode mode
, int reason
);
70 extern int isolate_movable_page(struct page
*page
, isolate_mode_t mode
);
71 extern void putback_movable_page(struct page
*page
);
73 extern int migrate_prep(void);
74 extern int migrate_prep_local(void);
75 extern void migrate_page_states(struct page
*newpage
, struct page
*page
);
76 extern void migrate_page_copy(struct page
*newpage
, struct page
*page
);
77 extern int migrate_huge_page_move_mapping(struct address_space
*mapping
,
78 struct page
*newpage
, struct page
*page
);
79 extern int migrate_page_move_mapping(struct address_space
*mapping
,
80 struct page
*newpage
, struct page
*page
, int extra_count
);
83 static inline void putback_movable_pages(struct list_head
*l
) {}
84 static inline int migrate_pages(struct list_head
*l
, new_page_t
new,
85 free_page_t free
, unsigned long private, enum migrate_mode mode
,
88 static inline int isolate_movable_page(struct page
*page
, isolate_mode_t mode
)
91 static inline int migrate_prep(void) { return -ENOSYS
; }
92 static inline int migrate_prep_local(void) { return -ENOSYS
; }
94 static inline void migrate_page_states(struct page
*newpage
, struct page
*page
)
98 static inline void migrate_page_copy(struct page
*newpage
,
101 static inline int migrate_huge_page_move_mapping(struct address_space
*mapping
,
102 struct page
*newpage
, struct page
*page
)
107 #endif /* CONFIG_MIGRATION */
109 #ifdef CONFIG_COMPACTION
110 extern int PageMovable(struct page
*page
);
111 extern void __SetPageMovable(struct page
*page
, struct address_space
*mapping
);
112 extern void __ClearPageMovable(struct page
*page
);
114 static inline int PageMovable(struct page
*page
) { return 0; };
115 static inline void __SetPageMovable(struct page
*page
,
116 struct address_space
*mapping
)
119 static inline void __ClearPageMovable(struct page
*page
)
124 #ifdef CONFIG_NUMA_BALANCING
125 extern bool pmd_trans_migrating(pmd_t pmd
);
126 extern int migrate_misplaced_page(struct page
*page
,
127 struct vm_area_struct
*vma
, int node
);
129 static inline bool pmd_trans_migrating(pmd_t pmd
)
133 static inline int migrate_misplaced_page(struct page
*page
,
134 struct vm_area_struct
*vma
, int node
)
136 return -EAGAIN
; /* can't migrate now */
138 #endif /* CONFIG_NUMA_BALANCING */
140 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
141 extern int migrate_misplaced_transhuge_page(struct mm_struct
*mm
,
142 struct vm_area_struct
*vma
,
143 pmd_t
*pmd
, pmd_t entry
,
144 unsigned long address
,
145 struct page
*page
, int node
);
147 static inline int migrate_misplaced_transhuge_page(struct mm_struct
*mm
,
148 struct vm_area_struct
*vma
,
149 pmd_t
*pmd
, pmd_t entry
,
150 unsigned long address
,
151 struct page
*page
, int node
)
155 #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
158 #ifdef CONFIG_MIGRATION
161 * Watch out for PAE architecture, which has an unsigned long, and might not
162 * have enough bits to store all physical address and flags. So far we have
163 * enough room for all our flags.
165 #define MIGRATE_PFN_VALID (1UL << 0)
166 #define MIGRATE_PFN_MIGRATE (1UL << 1)
167 #define MIGRATE_PFN_LOCKED (1UL << 2)
168 #define MIGRATE_PFN_WRITE (1UL << 3)
169 #define MIGRATE_PFN_SHIFT 6
171 static inline struct page
*migrate_pfn_to_page(unsigned long mpfn
)
173 if (!(mpfn
& MIGRATE_PFN_VALID
))
175 return pfn_to_page(mpfn
>> MIGRATE_PFN_SHIFT
);
178 static inline unsigned long migrate_pfn(unsigned long pfn
)
180 return (pfn
<< MIGRATE_PFN_SHIFT
) | MIGRATE_PFN_VALID
;
184 struct vm_area_struct
*vma
;
186 * Both src and dst array must be big enough for
187 * (end - start) >> PAGE_SHIFT entries.
189 * The src array must not be modified by the caller after
190 * migrate_vma_setup(), and must not change the dst array after
191 * migrate_vma_pages() returns.
195 unsigned long cpages
;
196 unsigned long npages
;
201 int migrate_vma_setup(struct migrate_vma
*args
);
202 void migrate_vma_pages(struct migrate_vma
*migrate
);
203 void migrate_vma_finalize(struct migrate_vma
*migrate
);
205 #endif /* CONFIG_MIGRATION */
207 #endif /* _LINUX_MIGRATE_H */