2 * linux/arch/m68k/mm/motorola.c
4 * Routines specific to the Motorola MMU, originally from:
5 * linux/arch/m68k/init.c
6 * which are Copyright (C) 1995 Hamish Macdonald
8 * Moved 8/20/1999 Sam Creasey
11 #include <linux/module.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
15 #include <linux/swap.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/gfp.h>
23 #include <asm/setup.h>
24 #include <asm/uaccess.h>
26 #include <asm/pgalloc.h>
27 #include <asm/system.h>
28 #include <asm/machdep.h>
32 #include <asm/atari_stram.h>
34 #include <asm/sections.h>
40 * Bits to add to page descriptors for "normal" caching mode.
41 * For 68020/030 this is 0.
42 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
44 unsigned long mm_cachebits
;
45 EXPORT_SYMBOL(mm_cachebits
);
48 /* size of memory already mapped in head.S */
49 #define INIT_MAPPED_SIZE (4UL<<20)
51 extern unsigned long availmem
;
53 static pte_t
* __init
kernel_page_table(void)
57 ptablep
= (pte_t
*)alloc_bootmem_low_pages(PAGE_SIZE
);
60 __flush_page_to_ram(ptablep
);
61 flush_tlb_kernel_page(ptablep
);
62 nocache_page(ptablep
);
67 static pmd_t
*last_pgtable __initdata
= NULL
;
68 pmd_t
*zero_pgtable __initdata
= NULL
;
70 static pmd_t
* __init
kernel_ptr_table(void)
73 unsigned long pmd
, last
;
76 /* Find the last ptr table that was used in head.S and
77 * reuse the remaining space in that page for further
80 last
= (unsigned long)kernel_pg_dir
;
81 for (i
= 0; i
< PTRS_PER_PGD
; i
++) {
82 if (!pgd_present(kernel_pg_dir
[i
]))
84 pmd
= __pgd_page(kernel_pg_dir
[i
]);
89 last_pgtable
= (pmd_t
*)last
;
91 printk("kernel_ptr_init: %p\n", last_pgtable
);
95 last_pgtable
+= PTRS_PER_PMD
;
96 if (((unsigned long)last_pgtable
& ~PAGE_MASK
) == 0) {
97 last_pgtable
= (pmd_t
*)alloc_bootmem_low_pages(PAGE_SIZE
);
99 clear_page(last_pgtable
);
100 __flush_page_to_ram(last_pgtable
);
101 flush_tlb_kernel_page(last_pgtable
);
102 nocache_page(last_pgtable
);
108 static void __init
map_node(int node
)
110 #define PTRTREESIZE (256*1024)
111 #define ROOTTREESIZE (32*1024*1024)
112 unsigned long physaddr
, virtaddr
, size
;
117 size
= m68k_memory
[node
].size
;
118 physaddr
= m68k_memory
[node
].addr
;
119 virtaddr
= (unsigned long)phys_to_virt(physaddr
);
120 physaddr
|= m68k_supervisor_cachemode
|
121 _PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_DIRTY
;
122 if (CPU_IS_040_OR_060
)
123 physaddr
|= _PAGE_GLOBAL040
;
127 if (!(virtaddr
& (PTRTREESIZE
-1)))
128 printk ("\npa=%#lx va=%#lx ", physaddr
& PAGE_MASK
,
131 pgd_dir
= pgd_offset_k(virtaddr
);
132 if (virtaddr
&& CPU_IS_020_OR_030
) {
133 if (!(virtaddr
& (ROOTTREESIZE
-1)) &&
134 size
>= ROOTTREESIZE
) {
136 printk ("[very early term]");
138 pgd_val(*pgd_dir
) = physaddr
;
139 size
-= ROOTTREESIZE
;
140 virtaddr
+= ROOTTREESIZE
;
141 physaddr
+= ROOTTREESIZE
;
145 if (!pgd_present(*pgd_dir
)) {
146 pmd_dir
= kernel_ptr_table();
148 printk ("[new pointer %p]", pmd_dir
);
150 pgd_set(pgd_dir
, pmd_dir
);
152 pmd_dir
= pmd_offset(pgd_dir
, virtaddr
);
154 if (CPU_IS_020_OR_030
) {
157 printk ("[early term]");
159 pmd_dir
->pmd
[(virtaddr
/PTRTREESIZE
) & 15] = physaddr
;
160 physaddr
+= PTRTREESIZE
;
164 printk ("[zero map]");
166 zero_pgtable
= kernel_ptr_table();
167 pte_dir
= (pte_t
*)zero_pgtable
;
168 pmd_dir
->pmd
[0] = virt_to_phys(pte_dir
) |
169 _PAGE_TABLE
| _PAGE_ACCESSED
;
170 pte_val(*pte_dir
++) = 0;
171 physaddr
+= PAGE_SIZE
;
172 for (i
= 1; i
< 64; physaddr
+= PAGE_SIZE
, i
++)
173 pte_val(*pte_dir
++) = physaddr
;
176 virtaddr
+= PTRTREESIZE
;
178 if (!pmd_present(*pmd_dir
)) {
180 printk ("[new table]");
182 pte_dir
= kernel_page_table();
183 pmd_set(pmd_dir
, pte_dir
);
185 pte_dir
= pte_offset_kernel(pmd_dir
, virtaddr
);
188 if (!pte_present(*pte_dir
))
189 pte_val(*pte_dir
) = physaddr
;
191 pte_val(*pte_dir
) = 0;
193 virtaddr
+= PAGE_SIZE
;
194 physaddr
+= PAGE_SIZE
;
204 * paging_init() continues the virtual memory environment setup which
205 * was begun by the code in arch/head.S.
207 void __init
paging_init(void)
209 unsigned long zones_size
[MAX_NR_ZONES
] = { 0, };
210 unsigned long min_addr
, max_addr
;
211 unsigned long addr
, size
, end
;
215 printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir
, availmem
);
218 /* Fix the cache mode in the page descriptors for the 680[46]0. */
219 if (CPU_IS_040_OR_060
) {
222 mm_cachebits
= _PAGE_CACHE040
;
224 for (i
= 0; i
< 16; i
++)
225 pgprot_val(protection_map
[i
]) |= _PAGE_CACHE040
;
228 min_addr
= m68k_memory
[0].addr
;
229 max_addr
= min_addr
+ m68k_memory
[0].size
;
230 for (i
= 1; i
< m68k_num_memory
;) {
231 if (m68k_memory
[i
].addr
< min_addr
) {
232 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
233 m68k_memory
[i
].addr
, m68k_memory
[i
].size
);
234 printk("Fix your bootloader or use a memfile to make use of this area!\n");
236 memmove(m68k_memory
+ i
, m68k_memory
+ i
+ 1,
237 (m68k_num_memory
- i
) * sizeof(struct mem_info
));
240 addr
= m68k_memory
[i
].addr
+ m68k_memory
[i
].size
;
245 m68k_memoffset
= min_addr
- PAGE_OFFSET
;
246 m68k_virt_to_node_shift
= fls(max_addr
- min_addr
- 1) - 6;
248 module_fixup(NULL
, __start_fixup
, __stop_fixup
);
251 high_memory
= phys_to_virt(max_addr
);
253 min_low_pfn
= availmem
>> PAGE_SHIFT
;
254 max_low_pfn
= max_addr
>> PAGE_SHIFT
;
256 for (i
= 0; i
< m68k_num_memory
; i
++) {
257 addr
= m68k_memory
[i
].addr
;
258 end
= addr
+ m68k_memory
[i
].size
;
260 availmem
= PAGE_ALIGN(availmem
);
261 availmem
+= init_bootmem_node(NODE_DATA(i
),
262 availmem
>> PAGE_SHIFT
,
268 * Map the physical memory available into the kernel virtual
269 * address space. First initialize the bootmem allocator with
270 * the memory we already mapped, so map_node() has something
273 addr
= m68k_memory
[0].addr
;
274 size
= m68k_memory
[0].size
;
275 free_bootmem_node(NODE_DATA(0), availmem
, min(INIT_MAPPED_SIZE
, size
) - (availmem
- addr
));
277 if (size
> INIT_MAPPED_SIZE
)
278 free_bootmem_node(NODE_DATA(0), addr
+ INIT_MAPPED_SIZE
, size
- INIT_MAPPED_SIZE
);
280 for (i
= 1; i
< m68k_num_memory
; i
++)
286 * initialize the bad page table and bad page to point
287 * to a couple of allocated pages
289 empty_zero_page
= alloc_bootmem_pages(PAGE_SIZE
);
292 * Set up SFC/DFC registers
297 printk ("before free_area_init\n");
299 for (i
= 0; i
< m68k_num_memory
; i
++) {
300 zones_size
[ZONE_DMA
] = m68k_memory
[i
].size
>> PAGE_SHIFT
;
301 free_area_init_node(i
, zones_size
,
302 m68k_memory
[i
].addr
>> PAGE_SHIFT
, NULL
);
303 if (node_present_pages(i
))
304 node_set_state(i
, N_NORMAL_MEMORY
);
308 void free_initmem(void)
312 addr
= (unsigned long)__init_begin
;
313 for (; addr
< (unsigned long)__init_end
; addr
+= PAGE_SIZE
) {
314 virt_to_page(addr
)->flags
&= ~(1 << PG_reserved
);
315 init_page_count(virt_to_page(addr
));