mm: numa: Do not mark PTEs pte_numa when splitting huge pages
[linux/fpc-iii.git] / mm / early_ioremap.c
blobe10ccd299d6666887ba9347308afaa112ee8816f
1 /*
2 * Provide common bits of early_ioremap() support for architectures needing
3 * temporary mappings during boot before ioremap() is available.
5 * This is mostly a direct copy of the x86 early_ioremap implementation.
7 * (C) Copyright 1995 1996, 2014 Linus Torvalds
9 */
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <asm/fixmap.h>
19 #ifdef CONFIG_MMU
20 static int early_ioremap_debug __initdata;
22 static int __init early_ioremap_debug_setup(char *str)
24 early_ioremap_debug = 1;
26 return 0;
28 early_param("early_ioremap_debug", early_ioremap_debug_setup);
30 static int after_paging_init __initdata;
32 void __init __weak early_ioremap_shutdown(void)
36 void __init early_ioremap_reset(void)
38 early_ioremap_shutdown();
39 after_paging_init = 1;
43 * Generally, ioremap() is available after paging_init() has been called.
44 * Architectures wanting to allow early_ioremap after paging_init() can
45 * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
47 #ifndef __late_set_fixmap
48 static inline void __init __late_set_fixmap(enum fixed_addresses idx,
49 phys_addr_t phys, pgprot_t prot)
51 BUG();
53 #endif
55 #ifndef __late_clear_fixmap
56 static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
58 BUG();
60 #endif
62 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
63 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
64 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
66 void __init early_ioremap_setup(void)
68 int i;
70 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
71 if (WARN_ON(prev_map[i]))
72 break;
74 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
75 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
78 static int __init check_early_ioremap_leak(void)
80 int count = 0;
81 int i;
83 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
84 if (prev_map[i])
85 count++;
87 if (WARN(count, KERN_WARNING
88 "Debug warning: early ioremap leak of %d areas detected.\n"
89 "please boot with early_ioremap_debug and report the dmesg.\n",
90 count))
91 return 1;
92 return 0;
94 late_initcall(check_early_ioremap_leak);
96 static void __init __iomem *
97 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
99 unsigned long offset;
100 resource_size_t last_addr;
101 unsigned int nrpages;
102 enum fixed_addresses idx;
103 int i, slot;
105 WARN_ON(system_state != SYSTEM_BOOTING);
107 slot = -1;
108 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
109 if (!prev_map[i]) {
110 slot = i;
111 break;
115 if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n",
116 __func__, (u64)phys_addr, size))
117 return NULL;
119 /* Don't allow wraparound or zero size */
120 last_addr = phys_addr + size - 1;
121 if (WARN_ON(!size || last_addr < phys_addr))
122 return NULL;
124 prev_size[slot] = size;
126 * Mappings have to be page-aligned
128 offset = phys_addr & ~PAGE_MASK;
129 phys_addr &= PAGE_MASK;
130 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
133 * Mappings have to fit in the FIX_BTMAP area.
135 nrpages = size >> PAGE_SHIFT;
136 if (WARN_ON(nrpages > NR_FIX_BTMAPS))
137 return NULL;
140 * Ok, go for it..
142 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
143 while (nrpages > 0) {
144 if (after_paging_init)
145 __late_set_fixmap(idx, phys_addr, prot);
146 else
147 __early_set_fixmap(idx, phys_addr, prot);
148 phys_addr += PAGE_SIZE;
149 --idx;
150 --nrpages;
152 WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
153 __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]);
155 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
156 return prev_map[slot];
159 void __init early_iounmap(void __iomem *addr, unsigned long size)
161 unsigned long virt_addr;
162 unsigned long offset;
163 unsigned int nrpages;
164 enum fixed_addresses idx;
165 int i, slot;
167 slot = -1;
168 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
169 if (prev_map[i] == addr) {
170 slot = i;
171 break;
175 if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
176 addr, size))
177 return;
179 if (WARN(prev_size[slot] != size,
180 "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
181 addr, size, slot, prev_size[slot]))
182 return;
184 WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
185 addr, size, slot);
187 virt_addr = (unsigned long)addr;
188 if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
189 return;
191 offset = virt_addr & ~PAGE_MASK;
192 nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
194 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
195 while (nrpages > 0) {
196 if (after_paging_init)
197 __late_clear_fixmap(idx);
198 else
199 __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
200 --idx;
201 --nrpages;
203 prev_map[slot] = NULL;
206 /* Remap an IO device */
207 void __init __iomem *
208 early_ioremap(resource_size_t phys_addr, unsigned long size)
210 return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
213 /* Remap memory */
214 void __init *
215 early_memremap(resource_size_t phys_addr, unsigned long size)
217 return (__force void *)__early_ioremap(phys_addr, size,
218 FIXMAP_PAGE_NORMAL);
220 #else /* CONFIG_MMU */
222 void __init __iomem *
223 early_ioremap(resource_size_t phys_addr, unsigned long size)
225 return (__force void __iomem *)phys_addr;
228 /* Remap memory */
229 void __init *
230 early_memremap(resource_size_t phys_addr, unsigned long size)
232 return (void *)phys_addr;
235 void __init early_iounmap(void __iomem *addr, unsigned long size)
239 #endif /* CONFIG_MMU */
242 void __init early_memunmap(void *addr, unsigned long size)
244 early_iounmap((__force void __iomem *)addr, size);