1 // SPDX-License-Identifier: GPL-2.0
3 * Provide common bits of early_ioremap() support for architectures needing
4 * temporary mappings during boot before ioremap() is available.
6 * This is mostly a direct copy of the x86 early_ioremap implementation.
8 * (C) Copyright 1995 1996, 2014 Linus Torvalds
11 #include <linux/kernel.h>
12 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <asm/fixmap.h>
19 #include <asm/early_ioremap.h>
22 static int early_ioremap_debug __initdata
;
24 static int __init
early_ioremap_debug_setup(char *str
)
26 early_ioremap_debug
= 1;
30 early_param("early_ioremap_debug", early_ioremap_debug_setup
);
32 static int after_paging_init __initdata
;
34 pgprot_t __init __weak
early_memremap_pgprot_adjust(resource_size_t phys_addr
,
41 void __init __weak
early_ioremap_shutdown(void)
45 void __init
early_ioremap_reset(void)
47 early_ioremap_shutdown();
48 after_paging_init
= 1;
52 * Generally, ioremap() is available after paging_init() has been called.
53 * Architectures wanting to allow early_ioremap after paging_init() can
54 * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
56 #ifndef __late_set_fixmap
57 static inline void __init
__late_set_fixmap(enum fixed_addresses idx
,
58 phys_addr_t phys
, pgprot_t prot
)
64 #ifndef __late_clear_fixmap
65 static inline void __init
__late_clear_fixmap(enum fixed_addresses idx
)
71 static void __iomem
*prev_map
[FIX_BTMAPS_SLOTS
] __initdata
;
72 static unsigned long prev_size
[FIX_BTMAPS_SLOTS
] __initdata
;
73 static unsigned long slot_virt
[FIX_BTMAPS_SLOTS
] __initdata
;
75 void __init
early_ioremap_setup(void)
79 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
80 if (WARN_ON(prev_map
[i
]))
83 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
84 slot_virt
[i
] = __fix_to_virt(FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*i
);
87 static int __init
check_early_ioremap_leak(void)
92 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++)
96 if (WARN(count
, KERN_WARNING
97 "Debug warning: early ioremap leak of %d areas detected.\n"
98 "please boot with early_ioremap_debug and report the dmesg.\n",
103 late_initcall(check_early_ioremap_leak
);
105 static void __init __iomem
*
106 __early_ioremap(resource_size_t phys_addr
, unsigned long size
, pgprot_t prot
)
108 unsigned long offset
;
109 resource_size_t last_addr
;
110 unsigned int nrpages
;
111 enum fixed_addresses idx
;
114 WARN_ON(system_state
!= SYSTEM_BOOTING
);
117 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
124 if (WARN(slot
< 0, "%s(%08llx, %08lx) not found slot\n",
125 __func__
, (u64
)phys_addr
, size
))
128 /* Don't allow wraparound or zero size */
129 last_addr
= phys_addr
+ size
- 1;
130 if (WARN_ON(!size
|| last_addr
< phys_addr
))
133 prev_size
[slot
] = size
;
135 * Mappings have to be page-aligned
137 offset
= offset_in_page(phys_addr
);
138 phys_addr
&= PAGE_MASK
;
139 size
= PAGE_ALIGN(last_addr
+ 1) - phys_addr
;
142 * Mappings have to fit in the FIX_BTMAP area.
144 nrpages
= size
>> PAGE_SHIFT
;
145 if (WARN_ON(nrpages
> NR_FIX_BTMAPS
))
151 idx
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
152 while (nrpages
> 0) {
153 if (after_paging_init
)
154 __late_set_fixmap(idx
, phys_addr
, prot
);
156 __early_set_fixmap(idx
, phys_addr
, prot
);
157 phys_addr
+= PAGE_SIZE
;
161 WARN(early_ioremap_debug
, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
162 __func__
, (u64
)phys_addr
, size
, slot
, offset
, slot_virt
[slot
]);
164 prev_map
[slot
] = (void __iomem
*)(offset
+ slot_virt
[slot
]);
165 return prev_map
[slot
];
168 void __init
early_iounmap(void __iomem
*addr
, unsigned long size
)
170 unsigned long virt_addr
;
171 unsigned long offset
;
172 unsigned int nrpages
;
173 enum fixed_addresses idx
;
177 for (i
= 0; i
< FIX_BTMAPS_SLOTS
; i
++) {
178 if (prev_map
[i
] == addr
) {
184 if (WARN(slot
< 0, "early_iounmap(%p, %08lx) not found slot\n",
188 if (WARN(prev_size
[slot
] != size
,
189 "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
190 addr
, size
, slot
, prev_size
[slot
]))
193 WARN(early_ioremap_debug
, "early_iounmap(%p, %08lx) [%d]\n",
196 virt_addr
= (unsigned long)addr
;
197 if (WARN_ON(virt_addr
< fix_to_virt(FIX_BTMAP_BEGIN
)))
200 offset
= offset_in_page(virt_addr
);
201 nrpages
= PAGE_ALIGN(offset
+ size
) >> PAGE_SHIFT
;
203 idx
= FIX_BTMAP_BEGIN
- NR_FIX_BTMAPS
*slot
;
204 while (nrpages
> 0) {
205 if (after_paging_init
)
206 __late_clear_fixmap(idx
);
208 __early_set_fixmap(idx
, 0, FIXMAP_PAGE_CLEAR
);
212 prev_map
[slot
] = NULL
;
215 /* Remap an IO device */
216 void __init __iomem
*
217 early_ioremap(resource_size_t phys_addr
, unsigned long size
)
219 return __early_ioremap(phys_addr
, size
, FIXMAP_PAGE_IO
);
224 early_memremap(resource_size_t phys_addr
, unsigned long size
)
226 pgprot_t prot
= early_memremap_pgprot_adjust(phys_addr
, size
,
229 return (__force
void *)__early_ioremap(phys_addr
, size
, prot
);
231 #ifdef FIXMAP_PAGE_RO
233 early_memremap_ro(resource_size_t phys_addr
, unsigned long size
)
235 pgprot_t prot
= early_memremap_pgprot_adjust(phys_addr
, size
,
238 return (__force
void *)__early_ioremap(phys_addr
, size
, prot
);
242 #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
244 early_memremap_prot(resource_size_t phys_addr
, unsigned long size
,
245 unsigned long prot_val
)
247 return (__force
void *)__early_ioremap(phys_addr
, size
,
252 #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
254 void __init
copy_from_early_mem(void *dest
, phys_addr_t src
, unsigned long size
)
256 unsigned long slop
, clen
;
260 slop
= offset_in_page(src
);
262 if (clen
> MAX_MAP_CHUNK
- slop
)
263 clen
= MAX_MAP_CHUNK
- slop
;
264 p
= early_memremap(src
& PAGE_MASK
, clen
+ slop
);
265 memcpy(dest
, p
+ slop
, clen
);
266 early_memunmap(p
, clen
+ slop
);
273 #else /* CONFIG_MMU */
275 void __init __iomem
*
276 early_ioremap(resource_size_t phys_addr
, unsigned long size
)
278 return (__force
void __iomem
*)phys_addr
;
283 early_memremap(resource_size_t phys_addr
, unsigned long size
)
285 return (void *)phys_addr
;
288 early_memremap_ro(resource_size_t phys_addr
, unsigned long size
)
290 return (void *)phys_addr
;
293 void __init
early_iounmap(void __iomem
*addr
, unsigned long size
)
297 #endif /* CONFIG_MMU */
300 void __init
early_memunmap(void *addr
, unsigned long size
)
302 early_iounmap((__force
void __iomem
*)addr
, size
);