mm, memory_hotplug: try to migrate full pfn range
[linux/fpc-iii.git] / kernel / memremap.c
blob0d5603d76c37859d496b2bf016cf04355ebd23ad
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
4 #include <linux/io.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
7 #include <linux/mm.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/types.h>
12 #include <linux/wait_bit.h>
13 #include <linux/xarray.h>
15 static DEFINE_XARRAY(pgmap_array);
16 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
17 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
19 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
20 vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
21 unsigned long addr,
22 swp_entry_t entry,
23 unsigned int flags,
24 pmd_t *pmdp)
26 struct page *page = device_private_entry_to_page(entry);
29 * The page_fault() callback must migrate page back to system memory
30 * so that CPU can access it. This might fail for various reasons
31 * (device issue, device was unsafely unplugged, ...). When such
32 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
34 * Note that because memory cgroup charges are accounted to the device
35 * memory, this should never fail because of memory restrictions (but
36 * allocation of regular system page might still fail because we are
37 * out of memory).
39 * There is a more in-depth description of what that callback can and
40 * cannot do, in include/linux/memremap.h
42 return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
44 EXPORT_SYMBOL(device_private_entry_fault);
45 #endif /* CONFIG_DEVICE_PRIVATE */
47 static void pgmap_array_delete(struct resource *res)
49 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
50 NULL, GFP_KERNEL);
51 synchronize_rcu();
54 static unsigned long pfn_first(struct dev_pagemap *pgmap)
56 const struct resource *res = &pgmap->res;
57 struct vmem_altmap *altmap = &pgmap->altmap;
58 unsigned long pfn;
60 pfn = res->start >> PAGE_SHIFT;
61 if (pgmap->altmap_valid)
62 pfn += vmem_altmap_offset(altmap);
63 return pfn;
66 static unsigned long pfn_end(struct dev_pagemap *pgmap)
68 const struct resource *res = &pgmap->res;
70 return (res->start + resource_size(res)) >> PAGE_SHIFT;
73 static unsigned long pfn_next(unsigned long pfn)
75 if (pfn % 1024 == 0)
76 cond_resched();
77 return pfn + 1;
80 #define for_each_device_pfn(pfn, map) \
81 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
83 static void devm_memremap_pages_release(void *data)
85 struct dev_pagemap *pgmap = data;
86 struct device *dev = pgmap->dev;
87 struct resource *res = &pgmap->res;
88 resource_size_t align_start, align_size;
89 unsigned long pfn;
90 int nid;
92 pgmap->kill(pgmap->ref);
93 for_each_device_pfn(pfn, pgmap)
94 put_page(pfn_to_page(pfn));
96 /* pages are dead and unused, undo the arch mapping */
97 align_start = res->start & ~(SECTION_SIZE - 1);
98 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
99 - align_start;
101 nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
103 mem_hotplug_begin();
104 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
105 pfn = align_start >> PAGE_SHIFT;
106 __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
107 align_size >> PAGE_SHIFT, NULL);
108 } else {
109 arch_remove_memory(nid, align_start, align_size,
110 pgmap->altmap_valid ? &pgmap->altmap : NULL);
111 kasan_remove_zero_shadow(__va(align_start), align_size);
113 mem_hotplug_done();
115 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
116 pgmap_array_delete(res);
117 dev_WARN_ONCE(dev, pgmap->altmap.alloc,
118 "%s: failed to free all reserved pages\n", __func__);
122 * devm_memremap_pages - remap and provide memmap backing for the given resource
123 * @dev: hosting device for @res
124 * @pgmap: pointer to a struct dev_pagemap
126 * Notes:
127 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
128 * by the caller before passing it to this function
130 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
131 * must be set to true
133 * 3/ pgmap->ref must be 'live' on entry and will be killed at
134 * devm_memremap_pages_release() time, or if this routine fails.
136 * 4/ res is expected to be a host memory range that could feasibly be
137 * treated as a "System RAM" range, i.e. not a device mmio range, but
138 * this is not enforced.
140 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
142 resource_size_t align_start, align_size, align_end;
143 struct vmem_altmap *altmap = pgmap->altmap_valid ?
144 &pgmap->altmap : NULL;
145 struct resource *res = &pgmap->res;
146 struct dev_pagemap *conflict_pgmap;
147 pgprot_t pgprot = PAGE_KERNEL;
148 int error, nid, is_ram;
150 if (!pgmap->ref || !pgmap->kill)
151 return ERR_PTR(-EINVAL);
153 align_start = res->start & ~(SECTION_SIZE - 1);
154 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
155 - align_start;
156 align_end = align_start + align_size - 1;
158 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
159 if (conflict_pgmap) {
160 dev_WARN(dev, "Conflicting mapping in same section\n");
161 put_dev_pagemap(conflict_pgmap);
162 return ERR_PTR(-ENOMEM);
165 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
166 if (conflict_pgmap) {
167 dev_WARN(dev, "Conflicting mapping in same section\n");
168 put_dev_pagemap(conflict_pgmap);
169 return ERR_PTR(-ENOMEM);
172 is_ram = region_intersects(align_start, align_size,
173 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
175 if (is_ram != REGION_DISJOINT) {
176 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
177 is_ram == REGION_MIXED ? "mixed" : "ram", res);
178 error = -ENXIO;
179 goto err_array;
182 pgmap->dev = dev;
184 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
185 PHYS_PFN(res->end), pgmap, GFP_KERNEL));
186 if (error)
187 goto err_array;
189 nid = dev_to_node(dev);
190 if (nid < 0)
191 nid = numa_mem_id();
193 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
194 align_size);
195 if (error)
196 goto err_pfn_remap;
198 mem_hotplug_begin();
201 * For device private memory we call add_pages() as we only need to
202 * allocate and initialize struct page for the device memory. More-
203 * over the device memory is un-accessible thus we do not want to
204 * create a linear mapping for the memory like arch_add_memory()
205 * would do.
207 * For all other device memory types, which are accessible by
208 * the CPU, we do want the linear mapping and thus use
209 * arch_add_memory().
211 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
212 error = add_pages(nid, align_start >> PAGE_SHIFT,
213 align_size >> PAGE_SHIFT, NULL, false);
214 } else {
215 error = kasan_add_zero_shadow(__va(align_start), align_size);
216 if (error) {
217 mem_hotplug_done();
218 goto err_kasan;
221 error = arch_add_memory(nid, align_start, align_size, altmap,
222 false);
225 if (!error) {
226 struct zone *zone;
228 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
229 move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
230 align_size >> PAGE_SHIFT, altmap);
233 mem_hotplug_done();
234 if (error)
235 goto err_add_memory;
238 * Initialization of the pages has been deferred until now in order
239 * to allow us to do the work while not holding the hotplug lock.
241 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
242 align_start >> PAGE_SHIFT,
243 align_size >> PAGE_SHIFT, pgmap);
244 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
246 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
247 pgmap);
248 if (error)
249 return ERR_PTR(error);
251 return __va(res->start);
253 err_add_memory:
254 kasan_remove_zero_shadow(__va(align_start), align_size);
255 err_kasan:
256 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
257 err_pfn_remap:
258 pgmap_array_delete(res);
259 err_array:
260 pgmap->kill(pgmap->ref);
261 return ERR_PTR(error);
263 EXPORT_SYMBOL_GPL(devm_memremap_pages);
265 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
267 /* number of pfns from base where pfn_to_page() is valid */
268 return altmap->reserve + altmap->free;
271 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
273 altmap->alloc -= nr_pfns;
277 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
278 * @pfn: page frame number to lookup page_map
279 * @pgmap: optional known pgmap that already has a reference
281 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
282 * is non-NULL but does not cover @pfn the reference to it will be released.
284 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
285 struct dev_pagemap *pgmap)
287 resource_size_t phys = PFN_PHYS(pfn);
290 * In the cached case we're already holding a live reference.
292 if (pgmap) {
293 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
294 return pgmap;
295 put_dev_pagemap(pgmap);
298 /* fall back to slow path lookup */
299 rcu_read_lock();
300 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
301 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
302 pgmap = NULL;
303 rcu_read_unlock();
305 return pgmap;
307 EXPORT_SYMBOL_GPL(get_dev_pagemap);
309 #ifdef CONFIG_DEV_PAGEMAP_OPS
310 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
311 EXPORT_SYMBOL(devmap_managed_key);
312 static atomic_t devmap_enable;
315 * Toggle the static key for ->page_free() callbacks when dev_pagemap
316 * pages go idle.
318 void dev_pagemap_get_ops(void)
320 if (atomic_inc_return(&devmap_enable) == 1)
321 static_branch_enable(&devmap_managed_key);
323 EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);
325 void dev_pagemap_put_ops(void)
327 if (atomic_dec_and_test(&devmap_enable))
328 static_branch_disable(&devmap_managed_key);
330 EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);
332 void __put_devmap_managed_page(struct page *page)
334 int count = page_ref_dec_return(page);
337 * If refcount is 1 then page is freed and refcount is stable as nobody
338 * holds a reference on the page.
340 if (count == 1) {
341 /* Clear Active bit in case of parallel mark_page_accessed */
342 __ClearPageActive(page);
343 __ClearPageWaiters(page);
345 mem_cgroup_uncharge(page);
347 page->pgmap->page_free(page, page->pgmap->data);
348 } else if (!count)
349 __put_page(page);
351 EXPORT_SYMBOL(__put_devmap_managed_page);
352 #endif /* CONFIG_DEV_PAGEMAP_OPS */