1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/device.h>
3 #include <linux/types.h>
8 /* temporary while we convert existing ioremap_cache users to memremap */
9 __weak
void __iomem
*ioremap_cache(resource_size_t offset
, unsigned long size
)
11 return ioremap(offset
, size
);
15 #ifndef arch_memremap_wb
16 static void *arch_memremap_wb(resource_size_t offset
, unsigned long size
)
18 return (__force
void *)ioremap_cache(offset
, size
);
22 #ifndef arch_memremap_can_ram_remap
23 static bool arch_memremap_can_ram_remap(resource_size_t offset
, size_t size
,
30 static void *try_ram_remap(resource_size_t offset
, size_t size
,
33 unsigned long pfn
= PHYS_PFN(offset
);
35 /* In the simple case just return the existing linear address */
36 if (pfn_valid(pfn
) && !PageHighMem(pfn_to_page(pfn
)) &&
37 arch_memremap_can_ram_remap(offset
, size
, flags
))
40 return NULL
; /* fallback to arch_memremap_wb */
44 * memremap() - remap an iomem_resource as cacheable memory
45 * @offset: iomem resource start address
46 * @size: size of remap
47 * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
48 * MEMREMAP_ENC, MEMREMAP_DEC
50 * memremap() is "ioremap" for cases where it is known that the resource
51 * being mapped does not have i/o side effects and the __iomem
52 * annotation is not applicable. In the case of multiple flags, the different
53 * mapping types will be attempted in the order listed below until one of
56 * MEMREMAP_WB - matches the default mapping for System RAM on
57 * the architecture. This is usually a read-allocate write-back cache.
58 * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
59 * memremap() will bypass establishing a new mapping and instead return
60 * a pointer into the direct map.
62 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
63 * cache or are written through to memory and never exist in a
64 * cache-dirty state with respect to program visibility. Attempts to
65 * map System RAM with this mapping type will fail.
67 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
68 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
69 * uncached. Attempts to map System RAM with this mapping type will fail.
71 void *memremap(resource_size_t offset
, size_t size
, unsigned long flags
)
73 int is_ram
= region_intersects(offset
, size
,
74 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
);
80 if (is_ram
== REGION_MIXED
) {
81 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
82 &offset
, (unsigned long) size
);
86 /* Try all mapping types requested until one returns non-NULL */
87 if (flags
& MEMREMAP_WB
) {
89 * MEMREMAP_WB is special in that it can be satisfied
90 * from the direct map. Some archs depend on the
91 * capability of memremap() to autodetect cases where
92 * the requested range is potentially in System RAM.
94 if (is_ram
== REGION_INTERSECTS
)
95 addr
= try_ram_remap(offset
, size
, flags
);
97 addr
= arch_memremap_wb(offset
, size
);
101 * If we don't have a mapping yet and other request flags are
102 * present then we will be attempting to establish a new virtual
103 * address mapping. Enforce that this mapping is not aliasing
106 if (!addr
&& is_ram
== REGION_INTERSECTS
&& flags
!= MEMREMAP_WB
) {
107 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
108 &offset
, (unsigned long) size
);
112 if (!addr
&& (flags
& MEMREMAP_WT
))
113 addr
= ioremap_wt(offset
, size
);
115 if (!addr
&& (flags
& MEMREMAP_WC
))
116 addr
= ioremap_wc(offset
, size
);
120 EXPORT_SYMBOL(memremap
);
122 void memunmap(void *addr
)
124 if (is_ioremap_addr(addr
))
125 iounmap((void __iomem
*) addr
);
127 EXPORT_SYMBOL(memunmap
);
129 static void devm_memremap_release(struct device
*dev
, void *res
)
131 memunmap(*(void **)res
);
134 static int devm_memremap_match(struct device
*dev
, void *res
, void *match_data
)
136 return *(void **)res
== match_data
;
139 void *devm_memremap(struct device
*dev
, resource_size_t offset
,
140 size_t size
, unsigned long flags
)
144 ptr
= devres_alloc_node(devm_memremap_release
, sizeof(*ptr
), GFP_KERNEL
,
147 return ERR_PTR(-ENOMEM
);
149 addr
= memremap(offset
, size
, flags
);
152 devres_add(dev
, ptr
);
155 return ERR_PTR(-ENXIO
);
160 EXPORT_SYMBOL(devm_memremap
);
162 void devm_memunmap(struct device
*dev
, void *addr
)
164 WARN_ON(devres_release(dev
, devm_memremap_release
,
165 devm_memremap_match
, addr
));
167 EXPORT_SYMBOL(devm_memunmap
);