Merge branch 'parisc-4.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[linux/fpc-iii.git] / kernel / memremap.c
blob9afdc434fb490a3384d847bc50647fa3dd3ab16a
1 /*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/radix-tree.h>
14 #include <linux/memremap.h>
15 #include <linux/device.h>
16 #include <linux/types.h>
17 #include <linux/pfn_t.h>
18 #include <linux/io.h>
19 #include <linux/mm.h>
20 #include <linux/memory_hotplug.h>
22 #ifndef ioremap_cache
23 /* temporary while we convert existing ioremap_cache users to memremap */
24 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
26 return ioremap(offset, size);
28 #endif
30 #ifndef arch_memremap_wb
31 static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
33 return (__force void *)ioremap_cache(offset, size);
35 #endif
37 #ifndef arch_memremap_can_ram_remap
38 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
39 unsigned long flags)
41 return true;
43 #endif
45 static void *try_ram_remap(resource_size_t offset, size_t size,
46 unsigned long flags)
48 unsigned long pfn = PHYS_PFN(offset);
50 /* In the simple case just return the existing linear address */
51 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
52 arch_memremap_can_ram_remap(offset, size, flags))
53 return __va(offset);
55 return NULL; /* fallback to arch_memremap_wb */
58 /**
59 * memremap() - remap an iomem_resource as cacheable memory
60 * @offset: iomem resource start address
61 * @size: size of remap
62 * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
63 * MEMREMAP_ENC, MEMREMAP_DEC
65 * memremap() is "ioremap" for cases where it is known that the resource
66 * being mapped does not have i/o side effects and the __iomem
67 * annotation is not applicable. In the case of multiple flags, the different
68 * mapping types will be attempted in the order listed below until one of
69 * them succeeds.
71 * MEMREMAP_WB - matches the default mapping for System RAM on
72 * the architecture. This is usually a read-allocate write-back cache.
73 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
74 * memremap() will bypass establishing a new mapping and instead return
75 * a pointer into the direct map.
77 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
78 * cache or are written through to memory and never exist in a
79 * cache-dirty state with respect to program visibility. Attempts to
80 * map System RAM with this mapping type will fail.
82 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
83 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
84 * uncached. Attempts to map System RAM with this mapping type will fail.
86 void *memremap(resource_size_t offset, size_t size, unsigned long flags)
88 int is_ram = region_intersects(offset, size,
89 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
90 void *addr = NULL;
92 if (!flags)
93 return NULL;
95 if (is_ram == REGION_MIXED) {
96 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
97 &offset, (unsigned long) size);
98 return NULL;
101 /* Try all mapping types requested until one returns non-NULL */
102 if (flags & MEMREMAP_WB) {
104 * MEMREMAP_WB is special in that it can be satisifed
105 * from the direct map. Some archs depend on the
106 * capability of memremap() to autodetect cases where
107 * the requested range is potentially in System RAM.
109 if (is_ram == REGION_INTERSECTS)
110 addr = try_ram_remap(offset, size, flags);
111 if (!addr)
112 addr = arch_memremap_wb(offset, size);
116 * If we don't have a mapping yet and other request flags are
117 * present then we will be attempting to establish a new virtual
118 * address mapping. Enforce that this mapping is not aliasing
119 * System RAM.
121 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
122 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
123 &offset, (unsigned long) size);
124 return NULL;
127 if (!addr && (flags & MEMREMAP_WT))
128 addr = ioremap_wt(offset, size);
130 if (!addr && (flags & MEMREMAP_WC))
131 addr = ioremap_wc(offset, size);
133 return addr;
135 EXPORT_SYMBOL(memremap);
137 void memunmap(void *addr)
139 if (is_vmalloc_addr(addr))
140 iounmap((void __iomem *) addr);
142 EXPORT_SYMBOL(memunmap);
144 static void devm_memremap_release(struct device *dev, void *res)
146 memunmap(*(void **)res);
149 static int devm_memremap_match(struct device *dev, void *res, void *match_data)
151 return *(void **)res == match_data;
154 void *devm_memremap(struct device *dev, resource_size_t offset,
155 size_t size, unsigned long flags)
157 void **ptr, *addr;
159 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
160 dev_to_node(dev));
161 if (!ptr)
162 return ERR_PTR(-ENOMEM);
164 addr = memremap(offset, size, flags);
165 if (addr) {
166 *ptr = addr;
167 devres_add(dev, ptr);
168 } else {
169 devres_free(ptr);
170 return ERR_PTR(-ENXIO);
173 return addr;
175 EXPORT_SYMBOL(devm_memremap);
177 void devm_memunmap(struct device *dev, void *addr)
179 WARN_ON(devres_release(dev, devm_memremap_release,
180 devm_memremap_match, addr));
182 EXPORT_SYMBOL(devm_memunmap);
184 #ifdef CONFIG_ZONE_DEVICE
185 static DEFINE_MUTEX(pgmap_lock);
186 static RADIX_TREE(pgmap_radix, GFP_KERNEL);
187 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
188 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
190 struct page_map {
191 struct resource res;
192 struct percpu_ref *ref;
193 struct dev_pagemap pgmap;
194 struct vmem_altmap altmap;
197 static void pgmap_radix_release(struct resource *res)
199 resource_size_t key, align_start, align_size, align_end;
201 align_start = res->start & ~(SECTION_SIZE - 1);
202 align_size = ALIGN(resource_size(res), SECTION_SIZE);
203 align_end = align_start + align_size - 1;
205 mutex_lock(&pgmap_lock);
206 for (key = res->start; key <= res->end; key += SECTION_SIZE)
207 radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
208 mutex_unlock(&pgmap_lock);
211 static unsigned long pfn_first(struct page_map *page_map)
213 struct dev_pagemap *pgmap = &page_map->pgmap;
214 const struct resource *res = &page_map->res;
215 struct vmem_altmap *altmap = pgmap->altmap;
216 unsigned long pfn;
218 pfn = res->start >> PAGE_SHIFT;
219 if (altmap)
220 pfn += vmem_altmap_offset(altmap);
221 return pfn;
224 static unsigned long pfn_end(struct page_map *page_map)
226 const struct resource *res = &page_map->res;
228 return (res->start + resource_size(res)) >> PAGE_SHIFT;
231 #define for_each_device_pfn(pfn, map) \
232 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
234 static void devm_memremap_pages_release(struct device *dev, void *data)
236 struct page_map *page_map = data;
237 struct resource *res = &page_map->res;
238 resource_size_t align_start, align_size;
239 struct dev_pagemap *pgmap = &page_map->pgmap;
240 unsigned long pfn;
242 for_each_device_pfn(pfn, page_map)
243 put_page(pfn_to_page(pfn));
245 if (percpu_ref_tryget_live(pgmap->ref)) {
246 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
247 percpu_ref_put(pgmap->ref);
250 /* pages are dead and unused, undo the arch mapping */
251 align_start = res->start & ~(SECTION_SIZE - 1);
252 align_size = ALIGN(resource_size(res), SECTION_SIZE);
254 mem_hotplug_begin();
255 arch_remove_memory(align_start, align_size);
256 mem_hotplug_done();
258 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
259 pgmap_radix_release(res);
260 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
261 "%s: failed to free all reserved pages\n", __func__);
264 /* assumes rcu_read_lock() held at entry */
265 struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
267 struct page_map *page_map;
269 WARN_ON_ONCE(!rcu_read_lock_held());
271 page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
272 return page_map ? &page_map->pgmap : NULL;
276 * devm_memremap_pages - remap and provide memmap backing for the given resource
277 * @dev: hosting device for @res
278 * @res: "host memory" address range
279 * @ref: a live per-cpu reference count
280 * @altmap: optional descriptor for allocating the memmap from @res
282 * Notes:
283 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
284 * (or devm release event). The expected order of events is that @ref has
285 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
286 * wait for the completion of all references being dropped and
287 * percpu_ref_exit() must occur after devm_memremap_pages_release().
289 * 2/ @res is expected to be a host memory range that could feasibly be
290 * treated as a "System RAM" range, i.e. not a device mmio range, but
291 * this is not enforced.
293 void *devm_memremap_pages(struct device *dev, struct resource *res,
294 struct percpu_ref *ref, struct vmem_altmap *altmap)
296 resource_size_t key, align_start, align_size, align_end;
297 pgprot_t pgprot = PAGE_KERNEL;
298 struct dev_pagemap *pgmap;
299 struct page_map *page_map;
300 int error, nid, is_ram;
301 unsigned long pfn;
303 align_start = res->start & ~(SECTION_SIZE - 1);
304 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
305 - align_start;
306 is_ram = region_intersects(align_start, align_size,
307 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
309 if (is_ram == REGION_MIXED) {
310 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
311 __func__, res);
312 return ERR_PTR(-ENXIO);
315 if (is_ram == REGION_INTERSECTS)
316 return __va(res->start);
318 if (!ref)
319 return ERR_PTR(-EINVAL);
321 page_map = devres_alloc_node(devm_memremap_pages_release,
322 sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
323 if (!page_map)
324 return ERR_PTR(-ENOMEM);
325 pgmap = &page_map->pgmap;
327 memcpy(&page_map->res, res, sizeof(*res));
329 pgmap->dev = dev;
330 if (altmap) {
331 memcpy(&page_map->altmap, altmap, sizeof(*altmap));
332 pgmap->altmap = &page_map->altmap;
334 pgmap->ref = ref;
335 pgmap->res = &page_map->res;
337 mutex_lock(&pgmap_lock);
338 error = 0;
339 align_end = align_start + align_size - 1;
340 for (key = align_start; key <= align_end; key += SECTION_SIZE) {
341 struct dev_pagemap *dup;
343 rcu_read_lock();
344 dup = find_dev_pagemap(key);
345 rcu_read_unlock();
346 if (dup) {
347 dev_err(dev, "%s: %pr collides with mapping for %s\n",
348 __func__, res, dev_name(dup->dev));
349 error = -EBUSY;
350 break;
352 error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
353 page_map);
354 if (error) {
355 dev_err(dev, "%s: failed: %d\n", __func__, error);
356 break;
359 mutex_unlock(&pgmap_lock);
360 if (error)
361 goto err_radix;
363 nid = dev_to_node(dev);
364 if (nid < 0)
365 nid = numa_mem_id();
367 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
368 align_size);
369 if (error)
370 goto err_pfn_remap;
372 mem_hotplug_begin();
373 error = arch_add_memory(nid, align_start, align_size, false);
374 if (!error)
375 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
376 align_start >> PAGE_SHIFT,
377 align_size >> PAGE_SHIFT);
378 mem_hotplug_done();
379 if (error)
380 goto err_add_memory;
382 for_each_device_pfn(pfn, page_map) {
383 struct page *page = pfn_to_page(pfn);
386 * ZONE_DEVICE pages union ->lru with a ->pgmap back
387 * pointer. It is a bug if a ZONE_DEVICE page is ever
388 * freed or placed on a driver-private list. Seed the
389 * storage with LIST_POISON* values.
391 list_del(&page->lru);
392 page->pgmap = pgmap;
393 percpu_ref_get(ref);
395 devres_add(dev, page_map);
396 return __va(res->start);
398 err_add_memory:
399 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
400 err_pfn_remap:
401 err_radix:
402 pgmap_radix_release(res);
403 devres_free(page_map);
404 return ERR_PTR(error);
406 EXPORT_SYMBOL(devm_memremap_pages);
408 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
410 /* number of pfns from base where pfn_to_page() is valid */
411 return altmap->reserve + altmap->free;
414 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
416 altmap->alloc -= nr_pfns;
419 struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
422 * 'memmap_start' is the virtual address for the first "struct
423 * page" in this range of the vmemmap array. In the case of
424 * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple
425 * pointer arithmetic, so we can perform this to_vmem_altmap()
426 * conversion without concern for the initialization state of
427 * the struct page fields.
429 struct page *page = (struct page *) memmap_start;
430 struct dev_pagemap *pgmap;
433 * Unconditionally retrieve a dev_pagemap associated with the
434 * given physical address, this is only for use in the
435 * arch_{add|remove}_memory() for setting up and tearing down
436 * the memmap.
438 rcu_read_lock();
439 pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
440 rcu_read_unlock();
442 return pgmap ? pgmap->altmap : NULL;
444 #endif /* CONFIG_ZONE_DEVICE */