1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/types.h>
12 #include <linux/wait_bit.h>
13 #include <linux/xarray.h>
15 static DEFINE_XARRAY(pgmap_array
);
16 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
17 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
19 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
20 vm_fault_t
device_private_entry_fault(struct vm_area_struct
*vma
,
26 struct page
*page
= device_private_entry_to_page(entry
);
29 * The page_fault() callback must migrate page back to system memory
30 * so that CPU can access it. This might fail for various reasons
31 * (device issue, device was unsafely unplugged, ...). When such
32 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
34 * Note that because memory cgroup charges are accounted to the device
35 * memory, this should never fail because of memory restrictions (but
36 * allocation of regular system page might still fail because we are
39 * There is a more in-depth description of what that callback can and
40 * cannot do, in include/linux/memremap.h
42 return page
->pgmap
->page_fault(vma
, addr
, page
, flags
, pmdp
);
44 EXPORT_SYMBOL(device_private_entry_fault
);
45 #endif /* CONFIG_DEVICE_PRIVATE */
47 static void pgmap_array_delete(struct resource
*res
)
49 xa_store_range(&pgmap_array
, PHYS_PFN(res
->start
), PHYS_PFN(res
->end
),
54 static unsigned long pfn_first(struct dev_pagemap
*pgmap
)
56 const struct resource
*res
= &pgmap
->res
;
57 struct vmem_altmap
*altmap
= &pgmap
->altmap
;
60 pfn
= res
->start
>> PAGE_SHIFT
;
61 if (pgmap
->altmap_valid
)
62 pfn
+= vmem_altmap_offset(altmap
);
66 static unsigned long pfn_end(struct dev_pagemap
*pgmap
)
68 const struct resource
*res
= &pgmap
->res
;
70 return (res
->start
+ resource_size(res
)) >> PAGE_SHIFT
;
73 static unsigned long pfn_next(unsigned long pfn
)
80 #define for_each_device_pfn(pfn, map) \
81 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
83 static void devm_memremap_pages_release(void *data
)
85 struct dev_pagemap
*pgmap
= data
;
86 struct device
*dev
= pgmap
->dev
;
87 struct resource
*res
= &pgmap
->res
;
88 resource_size_t align_start
, align_size
;
91 for_each_device_pfn(pfn
, pgmap
)
92 put_page(pfn_to_page(pfn
));
94 if (percpu_ref_tryget_live(pgmap
->ref
)) {
95 dev_WARN(dev
, "%s: page mapping is still live!\n", __func__
);
96 percpu_ref_put(pgmap
->ref
);
99 /* pages are dead and unused, undo the arch mapping */
100 align_start
= res
->start
& ~(SECTION_SIZE
- 1);
101 align_size
= ALIGN(res
->start
+ resource_size(res
), SECTION_SIZE
)
105 arch_remove_memory(align_start
, align_size
, pgmap
->altmap_valid
?
106 &pgmap
->altmap
: NULL
);
107 kasan_remove_zero_shadow(__va(align_start
), align_size
);
110 untrack_pfn(NULL
, PHYS_PFN(align_start
), align_size
);
111 pgmap_array_delete(res
);
112 dev_WARN_ONCE(dev
, pgmap
->altmap
.alloc
,
113 "%s: failed to free all reserved pages\n", __func__
);
117 * devm_memremap_pages - remap and provide memmap backing for the given resource
118 * @dev: hosting device for @res
119 * @pgmap: pointer to a struct dev_pgmap
122 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
123 * by the caller before passing it to this function
125 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
126 * must be set to true
128 * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
129 * time (or devm release event). The expected order of events is that ref has
130 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
131 * wait for the completion of all references being dropped and
132 * percpu_ref_exit() must occur after devm_memremap_pages_release().
134 * 4/ res is expected to be a host memory range that could feasibly be
135 * treated as a "System RAM" range, i.e. not a device mmio range, but
136 * this is not enforced.
138 void *devm_memremap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
140 resource_size_t align_start
, align_size
, align_end
;
141 struct vmem_altmap
*altmap
= pgmap
->altmap_valid
?
142 &pgmap
->altmap
: NULL
;
143 struct resource
*res
= &pgmap
->res
;
144 struct dev_pagemap
*conflict_pgmap
;
145 pgprot_t pgprot
= PAGE_KERNEL
;
146 int error
, nid
, is_ram
;
148 align_start
= res
->start
& ~(SECTION_SIZE
- 1);
149 align_size
= ALIGN(res
->start
+ resource_size(res
), SECTION_SIZE
)
151 align_end
= align_start
+ align_size
- 1;
153 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(align_start
), NULL
);
154 if (conflict_pgmap
) {
155 dev_WARN(dev
, "Conflicting mapping in same section\n");
156 put_dev_pagemap(conflict_pgmap
);
157 return ERR_PTR(-ENOMEM
);
160 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(align_end
), NULL
);
161 if (conflict_pgmap
) {
162 dev_WARN(dev
, "Conflicting mapping in same section\n");
163 put_dev_pagemap(conflict_pgmap
);
164 return ERR_PTR(-ENOMEM
);
167 is_ram
= region_intersects(align_start
, align_size
,
168 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
);
170 if (is_ram
== REGION_MIXED
) {
171 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
173 return ERR_PTR(-ENXIO
);
176 if (is_ram
== REGION_INTERSECTS
)
177 return __va(res
->start
);
180 return ERR_PTR(-EINVAL
);
184 error
= xa_err(xa_store_range(&pgmap_array
, PHYS_PFN(res
->start
),
185 PHYS_PFN(res
->end
), pgmap
, GFP_KERNEL
));
189 nid
= dev_to_node(dev
);
193 error
= track_pfn_remap(NULL
, &pgprot
, PHYS_PFN(align_start
), 0,
199 error
= kasan_add_zero_shadow(__va(align_start
), align_size
);
205 error
= arch_add_memory(nid
, align_start
, align_size
, altmap
, false);
207 move_pfn_range_to_zone(&NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
],
208 align_start
>> PAGE_SHIFT
,
209 align_size
>> PAGE_SHIFT
, altmap
);
215 * Initialization of the pages has been deferred until now in order
216 * to allow us to do the work while not holding the hotplug lock.
218 memmap_init_zone_device(&NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
],
219 align_start
>> PAGE_SHIFT
,
220 align_size
>> PAGE_SHIFT
, pgmap
);
221 percpu_ref_get_many(pgmap
->ref
, pfn_end(pgmap
) - pfn_first(pgmap
));
223 devm_add_action(dev
, devm_memremap_pages_release
, pgmap
);
225 return __va(res
->start
);
228 kasan_remove_zero_shadow(__va(align_start
), align_size
);
230 untrack_pfn(NULL
, PHYS_PFN(align_start
), align_size
);
232 pgmap_array_delete(res
);
234 return ERR_PTR(error
);
236 EXPORT_SYMBOL(devm_memremap_pages
);
238 unsigned long vmem_altmap_offset(struct vmem_altmap
*altmap
)
240 /* number of pfns from base where pfn_to_page() is valid */
241 return altmap
->reserve
+ altmap
->free
;
244 void vmem_altmap_free(struct vmem_altmap
*altmap
, unsigned long nr_pfns
)
246 altmap
->alloc
-= nr_pfns
;
250 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
251 * @pfn: page frame number to lookup page_map
252 * @pgmap: optional known pgmap that already has a reference
254 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
255 * is non-NULL but does not cover @pfn the reference to it will be released.
257 struct dev_pagemap
*get_dev_pagemap(unsigned long pfn
,
258 struct dev_pagemap
*pgmap
)
260 resource_size_t phys
= PFN_PHYS(pfn
);
263 * In the cached case we're already holding a live reference.
266 if (phys
>= pgmap
->res
.start
&& phys
<= pgmap
->res
.end
)
268 put_dev_pagemap(pgmap
);
271 /* fall back to slow path lookup */
273 pgmap
= xa_load(&pgmap_array
, PHYS_PFN(phys
));
274 if (pgmap
&& !percpu_ref_tryget_live(pgmap
->ref
))
280 EXPORT_SYMBOL_GPL(get_dev_pagemap
);
282 #ifdef CONFIG_DEV_PAGEMAP_OPS
283 DEFINE_STATIC_KEY_FALSE(devmap_managed_key
);
284 EXPORT_SYMBOL(devmap_managed_key
);
285 static atomic_t devmap_enable
;
288 * Toggle the static key for ->page_free() callbacks when dev_pagemap
291 void dev_pagemap_get_ops(void)
293 if (atomic_inc_return(&devmap_enable
) == 1)
294 static_branch_enable(&devmap_managed_key
);
296 EXPORT_SYMBOL_GPL(dev_pagemap_get_ops
);
298 void dev_pagemap_put_ops(void)
300 if (atomic_dec_and_test(&devmap_enable
))
301 static_branch_disable(&devmap_managed_key
);
303 EXPORT_SYMBOL_GPL(dev_pagemap_put_ops
);
305 void __put_devmap_managed_page(struct page
*page
)
307 int count
= page_ref_dec_return(page
);
310 * If refcount is 1 then page is freed and refcount is stable as nobody
311 * holds a reference on the page.
314 /* Clear Active bit in case of parallel mark_page_accessed */
315 __ClearPageActive(page
);
316 __ClearPageWaiters(page
);
318 mem_cgroup_uncharge(page
);
320 page
->pgmap
->page_free(page
, page
->pgmap
->data
);
324 EXPORT_SYMBOL(__put_devmap_managed_page
);
325 #endif /* CONFIG_DEV_PAGEMAP_OPS */