x86: revert "x86: fix pmd_bad and pud_bad to support huge pages"
[wrt350n-kernel.git] / arch / m68k / mm / motorola.c
blob30d34f285024622718d3760dca09fde8d98a8096
1 /*
2 * linux/arch/m68k/mm/motorola.c
4 * Routines specific to the Motorola MMU, originally from:
5 * linux/arch/m68k/init.c
6 * which are Copyright (C) 1995 Hamish Macdonald
8 * Moved 8/20/1999 Sam Creasey
9 */
11 #include <linux/module.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/swap.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
22 #include <asm/setup.h>
23 #include <asm/uaccess.h>
24 #include <asm/page.h>
25 #include <asm/pgalloc.h>
26 #include <asm/system.h>
27 #include <asm/machdep.h>
28 #include <asm/io.h>
29 #include <asm/dma.h>
30 #ifdef CONFIG_ATARI
31 #include <asm/atari_stram.h>
32 #endif
34 #undef DEBUG
36 #ifndef mm_cachebits
38 * Bits to add to page descriptors for "normal" caching mode.
39 * For 68020/030 this is 0.
40 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
42 unsigned long mm_cachebits;
43 EXPORT_SYMBOL(mm_cachebits);
44 #endif
46 /* size of memory already mapped in head.S */
47 #define INIT_MAPPED_SIZE (4UL<<20)
49 extern unsigned long availmem;
51 static pte_t * __init kernel_page_table(void)
53 pte_t *ptablep;
55 ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
57 clear_page(ptablep);
58 __flush_page_to_ram(ptablep);
59 flush_tlb_kernel_page(ptablep);
60 nocache_page(ptablep);
62 return ptablep;
65 static pmd_t *last_pgtable __initdata = NULL;
66 pmd_t *zero_pgtable __initdata = NULL;
68 static pmd_t * __init kernel_ptr_table(void)
70 if (!last_pgtable) {
71 unsigned long pmd, last;
72 int i;
74 /* Find the last ptr table that was used in head.S and
75 * reuse the remaining space in that page for further
76 * ptr tables.
78 last = (unsigned long)kernel_pg_dir;
79 for (i = 0; i < PTRS_PER_PGD; i++) {
80 if (!pgd_present(kernel_pg_dir[i]))
81 continue;
82 pmd = __pgd_page(kernel_pg_dir[i]);
83 if (pmd > last)
84 last = pmd;
87 last_pgtable = (pmd_t *)last;
88 #ifdef DEBUG
89 printk("kernel_ptr_init: %p\n", last_pgtable);
90 #endif
93 last_pgtable += PTRS_PER_PMD;
94 if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
95 last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
97 clear_page(last_pgtable);
98 __flush_page_to_ram(last_pgtable);
99 flush_tlb_kernel_page(last_pgtable);
100 nocache_page(last_pgtable);
103 return last_pgtable;
106 static void __init map_node(int node)
108 #define PTRTREESIZE (256*1024)
109 #define ROOTTREESIZE (32*1024*1024)
110 unsigned long physaddr, virtaddr, size;
111 pgd_t *pgd_dir;
112 pmd_t *pmd_dir;
113 pte_t *pte_dir;
115 size = m68k_memory[node].size;
116 physaddr = m68k_memory[node].addr;
117 virtaddr = (unsigned long)phys_to_virt(physaddr);
118 physaddr |= m68k_supervisor_cachemode |
119 _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
120 if (CPU_IS_040_OR_060)
121 physaddr |= _PAGE_GLOBAL040;
123 while (size > 0) {
124 #ifdef DEBUG
125 if (!(virtaddr & (PTRTREESIZE-1)))
126 printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
127 virtaddr);
128 #endif
129 pgd_dir = pgd_offset_k(virtaddr);
130 if (virtaddr && CPU_IS_020_OR_030) {
131 if (!(virtaddr & (ROOTTREESIZE-1)) &&
132 size >= ROOTTREESIZE) {
133 #ifdef DEBUG
134 printk ("[very early term]");
135 #endif
136 pgd_val(*pgd_dir) = physaddr;
137 size -= ROOTTREESIZE;
138 virtaddr += ROOTTREESIZE;
139 physaddr += ROOTTREESIZE;
140 continue;
143 if (!pgd_present(*pgd_dir)) {
144 pmd_dir = kernel_ptr_table();
145 #ifdef DEBUG
146 printk ("[new pointer %p]", pmd_dir);
147 #endif
148 pgd_set(pgd_dir, pmd_dir);
149 } else
150 pmd_dir = pmd_offset(pgd_dir, virtaddr);
152 if (CPU_IS_020_OR_030) {
153 if (virtaddr) {
154 #ifdef DEBUG
155 printk ("[early term]");
156 #endif
157 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
158 physaddr += PTRTREESIZE;
159 } else {
160 int i;
161 #ifdef DEBUG
162 printk ("[zero map]");
163 #endif
164 zero_pgtable = kernel_ptr_table();
165 pte_dir = (pte_t *)zero_pgtable;
166 pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
167 _PAGE_TABLE | _PAGE_ACCESSED;
168 pte_val(*pte_dir++) = 0;
169 physaddr += PAGE_SIZE;
170 for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
171 pte_val(*pte_dir++) = physaddr;
173 size -= PTRTREESIZE;
174 virtaddr += PTRTREESIZE;
175 } else {
176 if (!pmd_present(*pmd_dir)) {
177 #ifdef DEBUG
178 printk ("[new table]");
179 #endif
180 pte_dir = kernel_page_table();
181 pmd_set(pmd_dir, pte_dir);
183 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
185 if (virtaddr) {
186 if (!pte_present(*pte_dir))
187 pte_val(*pte_dir) = physaddr;
188 } else
189 pte_val(*pte_dir) = 0;
190 size -= PAGE_SIZE;
191 virtaddr += PAGE_SIZE;
192 physaddr += PAGE_SIZE;
196 #ifdef DEBUG
197 printk("\n");
198 #endif
202 * paging_init() continues the virtual memory environment setup which
203 * was begun by the code in arch/head.S.
205 void __init paging_init(void)
207 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
208 unsigned long min_addr, max_addr;
209 unsigned long addr, size, end;
210 int i;
212 #ifdef DEBUG
213 printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
214 #endif
216 /* Fix the cache mode in the page descriptors for the 680[46]0. */
217 if (CPU_IS_040_OR_060) {
218 int i;
219 #ifndef mm_cachebits
220 mm_cachebits = _PAGE_CACHE040;
221 #endif
222 for (i = 0; i < 16; i++)
223 pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
226 min_addr = m68k_memory[0].addr;
227 max_addr = min_addr + m68k_memory[0].size;
228 for (i = 1; i < m68k_num_memory;) {
229 if (m68k_memory[i].addr < min_addr) {
230 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
231 m68k_memory[i].addr, m68k_memory[i].size);
232 printk("Fix your bootloader or use a memfile to make use of this area!\n");
233 m68k_num_memory--;
234 memmove(m68k_memory + i, m68k_memory + i + 1,
235 (m68k_num_memory - i) * sizeof(struct mem_info));
236 continue;
238 addr = m68k_memory[i].addr + m68k_memory[i].size;
239 if (addr > max_addr)
240 max_addr = addr;
241 i++;
243 m68k_memoffset = min_addr - PAGE_OFFSET;
244 m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
246 module_fixup(NULL, __start_fixup, __stop_fixup);
247 flush_icache();
249 high_memory = phys_to_virt(max_addr);
251 min_low_pfn = availmem >> PAGE_SHIFT;
252 max_low_pfn = max_addr >> PAGE_SHIFT;
254 for (i = 0; i < m68k_num_memory; i++) {
255 addr = m68k_memory[i].addr;
256 end = addr + m68k_memory[i].size;
257 m68k_setup_node(i);
258 availmem = PAGE_ALIGN(availmem);
259 availmem += init_bootmem_node(NODE_DATA(i),
260 availmem >> PAGE_SHIFT,
261 addr >> PAGE_SHIFT,
262 end >> PAGE_SHIFT);
266 * Map the physical memory available into the kernel virtual
267 * address space. First initialize the bootmem allocator with
268 * the memory we already mapped, so map_node() has something
269 * to allocate.
271 addr = m68k_memory[0].addr;
272 size = m68k_memory[0].size;
273 free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr));
274 map_node(0);
275 if (size > INIT_MAPPED_SIZE)
276 free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE);
278 for (i = 1; i < m68k_num_memory; i++)
279 map_node(i);
281 flush_tlb_all();
284 * initialize the bad page table and bad page to point
285 * to a couple of allocated pages
287 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
288 memset(empty_zero_page, 0, PAGE_SIZE);
291 * Set up SFC/DFC registers
293 set_fs(KERNEL_DS);
295 #ifdef DEBUG
296 printk ("before free_area_init\n");
297 #endif
298 for (i = 0; i < m68k_num_memory; i++) {
299 zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
300 free_area_init_node(i, pg_data_map + i, zones_size,
301 m68k_memory[i].addr >> PAGE_SHIFT, NULL);
305 extern char __init_begin, __init_end;
307 void free_initmem(void)
309 unsigned long addr;
311 addr = (unsigned long)&__init_begin;
312 for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
313 virt_to_page(addr)->flags &= ~(1 << PG_reserved);
314 init_page_count(virt_to_page(addr));
315 free_page(addr);
316 totalram_pages++;