1 // SPDX-License-Identifier: GPL-2.0-or-later
4 #include <linux/slab.h>
5 #include <linux/mmzone.h>
6 #include <linux/vmalloc.h>
7 #include <asm/io-workarounds.h>
9 unsigned long ioremap_bot
;
10 EXPORT_SYMBOL(ioremap_bot
);
12 void __iomem
*ioremap(phys_addr_t addr
, unsigned long size
)
14 pgprot_t prot
= pgprot_noncached(PAGE_KERNEL
);
15 void *caller
= __builtin_return_address(0);
18 return iowa_ioremap(addr
, size
, prot
, caller
);
19 return __ioremap_caller(addr
, size
, prot
, caller
);
21 EXPORT_SYMBOL(ioremap
);
23 void __iomem
*ioremap_wc(phys_addr_t addr
, unsigned long size
)
25 pgprot_t prot
= pgprot_noncached_wc(PAGE_KERNEL
);
26 void *caller
= __builtin_return_address(0);
29 return iowa_ioremap(addr
, size
, prot
, caller
);
30 return __ioremap_caller(addr
, size
, prot
, caller
);
32 EXPORT_SYMBOL(ioremap_wc
);
34 void __iomem
*ioremap_coherent(phys_addr_t addr
, unsigned long size
)
36 pgprot_t prot
= pgprot_cached(PAGE_KERNEL
);
37 void *caller
= __builtin_return_address(0);
40 return iowa_ioremap(addr
, size
, prot
, caller
);
41 return __ioremap_caller(addr
, size
, prot
, caller
);
44 void __iomem
*ioremap_prot(phys_addr_t addr
, unsigned long size
, unsigned long flags
)
46 pte_t pte
= __pte(flags
);
47 void *caller
= __builtin_return_address(0);
49 /* writeable implies dirty for kernel addresses */
51 pte
= pte_mkdirty(pte
);
53 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
54 pte
= pte_exprotect(pte
);
55 pte
= pte_mkprivileged(pte
);
58 return iowa_ioremap(addr
, size
, pte_pgprot(pte
), caller
);
59 return __ioremap_caller(addr
, size
, pte_pgprot(pte
), caller
);
61 EXPORT_SYMBOL(ioremap_prot
);
63 int early_ioremap_range(unsigned long ea
, phys_addr_t pa
,
64 unsigned long size
, pgprot_t prot
)
68 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
69 int err
= map_kernel_page(ea
+ i
, pa
+ i
, prot
);
71 if (WARN_ON_ONCE(err
)) /* Should clean up */
78 void __iomem
*do_ioremap(phys_addr_t pa
, phys_addr_t offset
, unsigned long size
,
79 pgprot_t prot
, void *caller
)
81 struct vm_struct
*area
;
85 area
= __get_vm_area_caller(size
, VM_IOREMAP
, IOREMAP_START
, IOREMAP_END
, caller
);
90 va
= (unsigned long)area
->addr
;
92 ret
= ioremap_page_range(va
, va
+ size
, pa
, prot
);
94 return (void __iomem
*)area
->addr
+ offset
;
96 unmap_kernel_range(va
, size
);
102 #ifdef CONFIG_ZONE_DEVICE
104 * Override the generic version in mm/memremap.c.
106 * With hash translation, the direct-map range is mapped with just one
107 * page size selected by htab_init_page_sizes(). Consult
108 * mmu_psize_defs[] to determine the minimum page size alignment.
110 unsigned long memremap_compat_align(void)
112 unsigned int shift
= mmu_psize_defs
[mmu_linear_psize
].shift
;
115 return SUBSECTION_SIZE
;
116 return max(SUBSECTION_SIZE
, 1UL << shift
);
119 EXPORT_SYMBOL_GPL(memremap_compat_align
);