1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/mmzone.h>
11 #include <linux/swapops.h>
12 #include <linux/types.h>
13 #include <linux/wait_bit.h>
14 #include <linux/xarray.h>
16 static DEFINE_XARRAY(pgmap_array
);
19 * The memremap() and memremap_pages() interfaces are alternately used
20 * to map persistent memory namespaces. These interfaces place different
21 * constraints on the alignment and size of the mapping (namespace).
22 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
23 * only map subsections (2MB), and at least one architecture (PowerPC)
24 * the minimum mapping granularity of memremap_pages() is 16MB.
26 * The role of memremap_compat_align() is to communicate the minimum
27 * arch supported alignment of a namespace such that it can freely
28 * switch modes without violating the arch constraint. Namely, do not
29 * allow a namespace to be PAGE_SIZE aligned since that namespace may be
30 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
32 #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
33 unsigned long memremap_compat_align(void)
35 return SUBSECTION_SIZE
;
37 EXPORT_SYMBOL_GPL(memremap_compat_align
);
40 #ifdef CONFIG_DEV_PAGEMAP_OPS
41 DEFINE_STATIC_KEY_FALSE(devmap_managed_key
);
42 EXPORT_SYMBOL(devmap_managed_key
);
43 static atomic_t devmap_managed_enable
;
45 static void devmap_managed_enable_put(void)
47 if (atomic_dec_and_test(&devmap_managed_enable
))
48 static_branch_disable(&devmap_managed_key
);
51 static int devmap_managed_enable_get(struct dev_pagemap
*pgmap
)
53 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
&&
54 (!pgmap
->ops
|| !pgmap
->ops
->page_free
)) {
55 WARN(1, "Missing page_free method\n");
59 if (atomic_inc_return(&devmap_managed_enable
) == 1)
60 static_branch_enable(&devmap_managed_key
);
64 static int devmap_managed_enable_get(struct dev_pagemap
*pgmap
)
68 static void devmap_managed_enable_put(void)
71 #endif /* CONFIG_DEV_PAGEMAP_OPS */
73 static void pgmap_array_delete(struct resource
*res
)
75 xa_store_range(&pgmap_array
, PHYS_PFN(res
->start
), PHYS_PFN(res
->end
),
80 static unsigned long pfn_first(struct dev_pagemap
*pgmap
)
82 return PHYS_PFN(pgmap
->res
.start
) +
83 vmem_altmap_offset(pgmap_altmap(pgmap
));
86 static unsigned long pfn_end(struct dev_pagemap
*pgmap
)
88 const struct resource
*res
= &pgmap
->res
;
90 return (res
->start
+ resource_size(res
)) >> PAGE_SHIFT
;
93 static unsigned long pfn_next(unsigned long pfn
)
100 #define for_each_device_pfn(pfn, map) \
101 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
103 static void dev_pagemap_kill(struct dev_pagemap
*pgmap
)
105 if (pgmap
->ops
&& pgmap
->ops
->kill
)
106 pgmap
->ops
->kill(pgmap
);
108 percpu_ref_kill(pgmap
->ref
);
111 static void dev_pagemap_cleanup(struct dev_pagemap
*pgmap
)
113 if (pgmap
->ops
&& pgmap
->ops
->cleanup
) {
114 pgmap
->ops
->cleanup(pgmap
);
116 wait_for_completion(&pgmap
->done
);
117 percpu_ref_exit(pgmap
->ref
);
120 * Undo the pgmap ref assignment for the internal case as the
121 * caller may re-enable the same pgmap.
123 if (pgmap
->ref
== &pgmap
->internal_ref
)
127 void memunmap_pages(struct dev_pagemap
*pgmap
)
129 struct resource
*res
= &pgmap
->res
;
130 struct page
*first_page
;
134 dev_pagemap_kill(pgmap
);
135 for_each_device_pfn(pfn
, pgmap
)
136 put_page(pfn_to_page(pfn
));
137 dev_pagemap_cleanup(pgmap
);
139 /* make sure to access a memmap that was actually initialized */
140 first_page
= pfn_to_page(pfn_first(pgmap
));
142 /* pages are dead and unused, undo the arch mapping */
143 nid
= page_to_nid(first_page
);
146 remove_pfn_range_from_zone(page_zone(first_page
), PHYS_PFN(res
->start
),
147 PHYS_PFN(resource_size(res
)));
148 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
) {
149 __remove_pages(PHYS_PFN(res
->start
),
150 PHYS_PFN(resource_size(res
)), NULL
);
152 arch_remove_memory(nid
, res
->start
, resource_size(res
),
153 pgmap_altmap(pgmap
));
154 kasan_remove_zero_shadow(__va(res
->start
), resource_size(res
));
158 untrack_pfn(NULL
, PHYS_PFN(res
->start
), resource_size(res
));
159 pgmap_array_delete(res
);
160 WARN_ONCE(pgmap
->altmap
.alloc
, "failed to free all reserved pages\n");
161 devmap_managed_enable_put();
163 EXPORT_SYMBOL_GPL(memunmap_pages
);
165 static void devm_memremap_pages_release(void *data
)
167 memunmap_pages(data
);
170 static void dev_pagemap_percpu_release(struct percpu_ref
*ref
)
172 struct dev_pagemap
*pgmap
=
173 container_of(ref
, struct dev_pagemap
, internal_ref
);
175 complete(&pgmap
->done
);
179 * Not device managed version of dev_memremap_pages, undone by
180 * memunmap_pages(). Please use dev_memremap_pages if you have a struct
183 void *memremap_pages(struct dev_pagemap
*pgmap
, int nid
)
185 struct resource
*res
= &pgmap
->res
;
186 struct dev_pagemap
*conflict_pgmap
;
187 struct mhp_params params
= {
189 * We do not want any optional features only our own memmap
191 .altmap
= pgmap_altmap(pgmap
),
192 .pgprot
= PAGE_KERNEL
,
195 bool need_devmap_managed
= true;
197 switch (pgmap
->type
) {
198 case MEMORY_DEVICE_PRIVATE
:
199 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE
)) {
200 WARN(1, "Device private memory not supported\n");
201 return ERR_PTR(-EINVAL
);
203 if (!pgmap
->ops
|| !pgmap
->ops
->migrate_to_ram
) {
204 WARN(1, "Missing migrate_to_ram method\n");
205 return ERR_PTR(-EINVAL
);
208 WARN(1, "Missing owner\n");
209 return ERR_PTR(-EINVAL
);
212 case MEMORY_DEVICE_FS_DAX
:
213 if (!IS_ENABLED(CONFIG_ZONE_DEVICE
) ||
214 IS_ENABLED(CONFIG_FS_DAX_LIMITED
)) {
215 WARN(1, "File system DAX not supported\n");
216 return ERR_PTR(-EINVAL
);
219 case MEMORY_DEVICE_DEVDAX
:
220 need_devmap_managed
= false;
222 case MEMORY_DEVICE_PCI_P2PDMA
:
223 params
.pgprot
= pgprot_noncached(params
.pgprot
);
224 need_devmap_managed
= false;
227 WARN(1, "Invalid pgmap type %d\n", pgmap
->type
);
232 if (pgmap
->ops
&& (pgmap
->ops
->kill
|| pgmap
->ops
->cleanup
))
233 return ERR_PTR(-EINVAL
);
235 init_completion(&pgmap
->done
);
236 error
= percpu_ref_init(&pgmap
->internal_ref
,
237 dev_pagemap_percpu_release
, 0, GFP_KERNEL
);
239 return ERR_PTR(error
);
240 pgmap
->ref
= &pgmap
->internal_ref
;
242 if (!pgmap
->ops
|| !pgmap
->ops
->kill
|| !pgmap
->ops
->cleanup
) {
243 WARN(1, "Missing reference count teardown definition\n");
244 return ERR_PTR(-EINVAL
);
248 if (need_devmap_managed
) {
249 error
= devmap_managed_enable_get(pgmap
);
251 return ERR_PTR(error
);
254 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(res
->start
), NULL
);
255 if (conflict_pgmap
) {
256 WARN(1, "Conflicting mapping in same section\n");
257 put_dev_pagemap(conflict_pgmap
);
262 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(res
->end
), NULL
);
263 if (conflict_pgmap
) {
264 WARN(1, "Conflicting mapping in same section\n");
265 put_dev_pagemap(conflict_pgmap
);
270 is_ram
= region_intersects(res
->start
, resource_size(res
),
271 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
);
273 if (is_ram
!= REGION_DISJOINT
) {
274 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__
,
275 is_ram
== REGION_MIXED
? "mixed" : "ram", res
);
280 error
= xa_err(xa_store_range(&pgmap_array
, PHYS_PFN(res
->start
),
281 PHYS_PFN(res
->end
), pgmap
, GFP_KERNEL
));
288 error
= track_pfn_remap(NULL
, ¶ms
.pgprot
, PHYS_PFN(res
->start
),
289 0, resource_size(res
));
296 * For device private memory we call add_pages() as we only need to
297 * allocate and initialize struct page for the device memory. More-
298 * over the device memory is un-accessible thus we do not want to
299 * create a linear mapping for the memory like arch_add_memory()
302 * For all other device memory types, which are accessible by
303 * the CPU, we do want the linear mapping and thus use
306 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
) {
307 error
= add_pages(nid
, PHYS_PFN(res
->start
),
308 PHYS_PFN(resource_size(res
)), ¶ms
);
310 error
= kasan_add_zero_shadow(__va(res
->start
), resource_size(res
));
316 error
= arch_add_memory(nid
, res
->start
, resource_size(res
),
323 zone
= &NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
];
324 move_pfn_range_to_zone(zone
, PHYS_PFN(res
->start
),
325 PHYS_PFN(resource_size(res
)), params
.altmap
);
333 * Initialization of the pages has been deferred until now in order
334 * to allow us to do the work while not holding the hotplug lock.
336 memmap_init_zone_device(&NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
],
337 PHYS_PFN(res
->start
),
338 PHYS_PFN(resource_size(res
)), pgmap
);
339 percpu_ref_get_many(pgmap
->ref
, pfn_end(pgmap
) - pfn_first(pgmap
));
340 return __va(res
->start
);
343 kasan_remove_zero_shadow(__va(res
->start
), resource_size(res
));
345 untrack_pfn(NULL
, PHYS_PFN(res
->start
), resource_size(res
));
347 pgmap_array_delete(res
);
349 dev_pagemap_kill(pgmap
);
350 dev_pagemap_cleanup(pgmap
);
351 devmap_managed_enable_put();
352 return ERR_PTR(error
);
354 EXPORT_SYMBOL_GPL(memremap_pages
);
357 * devm_memremap_pages - remap and provide memmap backing for the given resource
358 * @dev: hosting device for @res
359 * @pgmap: pointer to a struct dev_pagemap
362 * 1/ At a minimum the res and type members of @pgmap must be initialized
363 * by the caller before passing it to this function
365 * 2/ The altmap field may optionally be initialized, in which case
366 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
368 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
369 * 'live' on entry and will be killed and reaped at
370 * devm_memremap_pages_release() time, or if this routine fails.
372 * 4/ res is expected to be a host memory range that could feasibly be
373 * treated as a "System RAM" range, i.e. not a device mmio range, but
374 * this is not enforced.
376 void *devm_memremap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
381 ret
= memremap_pages(pgmap
, dev_to_node(dev
));
385 error
= devm_add_action_or_reset(dev
, devm_memremap_pages_release
,
388 return ERR_PTR(error
);
391 EXPORT_SYMBOL_GPL(devm_memremap_pages
);
393 void devm_memunmap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
395 devm_release_action(dev
, devm_memremap_pages_release
, pgmap
);
397 EXPORT_SYMBOL_GPL(devm_memunmap_pages
);
399 unsigned long vmem_altmap_offset(struct vmem_altmap
*altmap
)
401 /* number of pfns from base where pfn_to_page() is valid */
403 return altmap
->reserve
+ altmap
->free
;
407 void vmem_altmap_free(struct vmem_altmap
*altmap
, unsigned long nr_pfns
)
409 altmap
->alloc
-= nr_pfns
;
413 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
414 * @pfn: page frame number to lookup page_map
415 * @pgmap: optional known pgmap that already has a reference
417 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
418 * is non-NULL but does not cover @pfn the reference to it will be released.
420 struct dev_pagemap
*get_dev_pagemap(unsigned long pfn
,
421 struct dev_pagemap
*pgmap
)
423 resource_size_t phys
= PFN_PHYS(pfn
);
426 * In the cached case we're already holding a live reference.
429 if (phys
>= pgmap
->res
.start
&& phys
<= pgmap
->res
.end
)
431 put_dev_pagemap(pgmap
);
434 /* fall back to slow path lookup */
436 pgmap
= xa_load(&pgmap_array
, PHYS_PFN(phys
));
437 if (pgmap
&& !percpu_ref_tryget_live(pgmap
->ref
))
443 EXPORT_SYMBOL_GPL(get_dev_pagemap
);
445 #ifdef CONFIG_DEV_PAGEMAP_OPS
446 void free_devmap_managed_page(struct page
*page
)
448 /* notify page idle for dax */
449 if (!is_device_private_page(page
)) {
450 wake_up_var(&page
->_refcount
);
454 /* Clear Active bit in case of parallel mark_page_accessed */
455 __ClearPageActive(page
);
456 __ClearPageWaiters(page
);
458 mem_cgroup_uncharge(page
);
461 * When a device_private page is freed, the page->mapping field
462 * may still contain a (stale) mapping value. For example, the
463 * lower bits of page->mapping may still identify the page as an
464 * anonymous page. Ultimately, this entire field is just stale
465 * and wrong, and it will cause errors if not cleared. One
468 * migrate_vma_pages()
469 * migrate_vma_insert_page()
470 * page_add_new_anon_rmap()
471 * __page_set_anon_rmap()
472 * ...checks page->mapping, via PageAnon(page) call,
473 * and incorrectly concludes that the page is an
474 * anonymous page. Therefore, it incorrectly,
475 * silently fails to set up the new anon rmap.
477 * For other types of ZONE_DEVICE pages, migration is either
478 * handled differently or not done at all, so there is no need
479 * to clear page->mapping.
481 page
->mapping
= NULL
;
482 page
->pgmap
->ops
->page_free(page
);
484 #endif /* CONFIG_DEV_PAGEMAP_OPS */