1 // SPDX-License-Identifier: GPL-2.0-or-later
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
7 #include <mm/mmu_decl.h>
9 void __iomem
*ioremap_wt(phys_addr_t addr
, unsigned long size
)
11 pgprot_t prot
= pgprot_cached_wthru(PAGE_KERNEL
);
13 return __ioremap_caller(addr
, size
, prot
, __builtin_return_address(0));
15 EXPORT_SYMBOL(ioremap_wt
);
18 __ioremap_caller(phys_addr_t addr
, unsigned long size
, pgprot_t prot
, void *caller
)
21 phys_addr_t p
, offset
;
25 * Choose an address to map it to.
26 * Once the vmalloc system is running, we use it.
27 * Before then, we use space going down from IOREMAP_TOP
28 * (ioremap_bot records where we're up to).
31 offset
= addr
& ~PAGE_MASK
;
32 size
= PAGE_ALIGN(addr
+ size
) - p
;
35 * If the address lies within the first 16 MB, assume it's in ISA
38 if (p
< 16 * 1024 * 1024)
41 #ifndef CONFIG_CRASH_DUMP
43 * Don't allow anybody to remap normal RAM that we're using.
44 * mem_init() sets high_memory so only do the check after that.
46 if (slab_is_available() && p
<= virt_to_phys(high_memory
- 1) &&
47 page_is_ram(__phys_to_pfn(p
))) {
48 pr_warn("%s(): phys addr 0x%llx is RAM lr %ps\n", __func__
,
49 (unsigned long long)p
, __builtin_return_address(0));
58 * Is it already mapped? Perhaps overlapped by a previous
61 v
= p_block_mapped(p
);
63 return (void __iomem
*)v
+ offset
;
65 if (slab_is_available())
66 return do_ioremap(p
, offset
, size
, prot
, caller
);
69 * Should check if it is a candidate for a BAT mapping
71 pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller
);
73 err
= early_ioremap_range(ioremap_bot
- size
, p
, size
, prot
);
78 return (void __iomem
*)ioremap_bot
+ offset
;
81 void iounmap(volatile void __iomem
*addr
)
84 * If mapped by BATs then there is nothing to do.
85 * Calling vfree() generates a benign warning.
87 if (v_block_mapped((unsigned long)addr
))
90 if (addr
> high_memory
&& (unsigned long)addr
< ioremap_bot
)
91 vunmap((void *)(PAGE_MASK
& (unsigned long)addr
));
93 EXPORT_SYMBOL(iounmap
);