1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/m68k/mm/motorola.c
5 * Routines specific to the Motorola MMU, originally from:
6 * linux/arch/m68k/init.c
7 * which are Copyright (C) 1995 Hamish Macdonald
9 * Moved 8/20/1999 Sam Creasey
12 #include <linux/module.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
16 #include <linux/swap.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/init.h>
21 #include <linux/memblock.h>
22 #include <linux/gfp.h>
24 #include <asm/setup.h>
25 #include <linux/uaccess.h>
27 #include <asm/pgalloc.h>
28 #include <asm/machdep.h>
32 #include <asm/atari_stram.h>
34 #include <asm/sections.h>
40 * Bits to add to page descriptors for "normal" caching mode.
41 * For 68020/030 this is 0.
42 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
44 unsigned long mm_cachebits
;
45 EXPORT_SYMBOL(mm_cachebits
);
48 /* Prior to calling these routines, the page should have been flushed
49 * from both the cache and ATC, or the CPU might not notice that the
50 * cache setting for the page has been changed. -jskov
52 static inline void nocache_page(void *vaddr
)
54 unsigned long addr
= (unsigned long)vaddr
;
56 if (CPU_IS_040_OR_060
) {
57 pte_t
*ptep
= virt_to_kpte(addr
);
59 *ptep
= pte_mknocache(*ptep
);
63 static inline void cache_page(void *vaddr
)
65 unsigned long addr
= (unsigned long)vaddr
;
67 if (CPU_IS_040_OR_060
) {
68 pte_t
*ptep
= virt_to_kpte(addr
);
70 *ptep
= pte_mkcache(*ptep
);
75 * Motorola 680x0 user's manual recommends using uncached memory for address
78 * Seeing how the MMU can be external on (some of) these chips, that seems like
79 * a very important recommendation to follow. Provide some helpers to combat
80 * 'variation' amongst the users of this.
83 void mmu_page_ctor(void *page
)
85 __flush_page_to_ram(page
);
86 flush_tlb_kernel_page(page
);
90 void mmu_page_dtor(void *page
)
95 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
96 struct page instead of separately kmalloced struct. Stolen from
97 arch/sparc/mm/srmmu.c ... */
99 typedef struct list_head ptable_desc
;
101 static struct list_head ptable_list
[2] = {
102 LIST_HEAD_INIT(ptable_list
[0]),
103 LIST_HEAD_INIT(ptable_list
[1]),
106 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
107 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
108 #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
110 static const int ptable_shift
[2] = {
115 #define ptable_size(type) (1U << ptable_shift[type])
116 #define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
118 void __init
init_pointer_table(void *table
, int type
)
121 unsigned long ptable
= (unsigned long)table
;
122 unsigned long page
= ptable
& PAGE_MASK
;
123 unsigned int mask
= 1U << ((ptable
- page
)/ptable_size(type
));
125 dp
= PD_PTABLE(page
);
126 if (!(PD_MARKBITS(dp
) & mask
)) {
127 PD_MARKBITS(dp
) = ptable_mask(type
);
128 list_add(dp
, &ptable_list
[type
]);
131 PD_MARKBITS(dp
) &= ~mask
;
132 pr_debug("init_pointer_table: %lx, %x\n", ptable
, PD_MARKBITS(dp
));
134 /* unreserve the page so it's possible to free that page */
135 __ClearPageReserved(PD_PAGE(dp
));
136 init_page_count(PD_PAGE(dp
));
141 void *get_pointer_table(int type
)
143 ptable_desc
*dp
= ptable_list
[type
].next
;
144 unsigned int mask
= list_empty(&ptable_list
[type
]) ? 0 : PD_MARKBITS(dp
);
145 unsigned int tmp
, off
;
148 * For a pointer table for a user process address space, a
149 * table is taken from a page allocated for the purpose. Each
150 * page can hold 8 pointer tables. The page is remapped in
151 * virtual address space to be noncacheable.
157 if (!(page
= (void *)get_zeroed_page(GFP_KERNEL
)))
160 if (type
== TABLE_PTE
) {
162 * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
165 pgtable_pte_page_ctor(virt_to_page(page
));
170 new = PD_PTABLE(page
);
171 PD_MARKBITS(new) = ptable_mask(type
) - 1;
172 list_add_tail(new, dp
);
174 return (pmd_t
*)page
;
177 for (tmp
= 1, off
= 0; (mask
& tmp
) == 0; tmp
<<= 1, off
+= ptable_size(type
))
179 PD_MARKBITS(dp
) = mask
& ~tmp
;
180 if (!PD_MARKBITS(dp
)) {
181 /* move to end of list */
182 list_move_tail(dp
, &ptable_list
[type
]);
184 return page_address(PD_PAGE(dp
)) + off
;
187 int free_pointer_table(void *table
, int type
)
190 unsigned long ptable
= (unsigned long)table
;
191 unsigned long page
= ptable
& PAGE_MASK
;
192 unsigned int mask
= 1U << ((ptable
- page
)/ptable_size(type
));
194 dp
= PD_PTABLE(page
);
195 if (PD_MARKBITS (dp
) & mask
)
196 panic ("table already free!");
198 PD_MARKBITS (dp
) |= mask
;
200 if (PD_MARKBITS(dp
) == ptable_mask(type
)) {
201 /* all tables in page are free, free page */
203 mmu_page_dtor((void *)page
);
204 if (type
== TABLE_PTE
)
205 pgtable_pte_page_dtor(virt_to_page(page
));
208 } else if (ptable_list
[type
].next
!= dp
) {
210 * move this descriptor to the front of the list, since
211 * it has one or more free tables.
213 list_move(dp
, &ptable_list
[type
]);
218 /* size of memory already mapped in head.S */
219 extern __initdata
unsigned long m68k_init_mapped_size
;
221 extern unsigned long availmem
;
223 static pte_t
*last_pte_table __initdata
= NULL
;
225 static pte_t
* __init
kernel_page_table(void)
227 pte_t
*pte_table
= last_pte_table
;
229 if (PAGE_ALIGNED(last_pte_table
)) {
230 pte_table
= memblock_alloc_low(PAGE_SIZE
, PAGE_SIZE
);
232 panic("%s: Failed to allocate %lu bytes align=%lx\n",
233 __func__
, PAGE_SIZE
, PAGE_SIZE
);
236 clear_page(pte_table
);
237 mmu_page_ctor(pte_table
);
239 last_pte_table
= pte_table
;
242 last_pte_table
+= PTRS_PER_PTE
;
247 static pmd_t
*last_pmd_table __initdata
= NULL
;
249 static pmd_t
* __init
kernel_ptr_table(void)
251 if (!last_pmd_table
) {
252 unsigned long pmd
, last
;
255 /* Find the last ptr table that was used in head.S and
256 * reuse the remaining space in that page for further
259 last
= (unsigned long)kernel_pg_dir
;
260 for (i
= 0; i
< PTRS_PER_PGD
; i
++) {
261 pud_t
*pud
= (pud_t
*)(&kernel_pg_dir
[i
]);
263 if (!pud_present(*pud
))
265 pmd
= pgd_page_vaddr(kernel_pg_dir
[i
]);
270 last_pmd_table
= (pmd_t
*)last
;
272 printk("kernel_ptr_init: %p\n", last_pmd_table
);
276 last_pmd_table
+= PTRS_PER_PMD
;
277 if (PAGE_ALIGNED(last_pmd_table
)) {
278 last_pmd_table
= memblock_alloc_low(PAGE_SIZE
, PAGE_SIZE
);
280 panic("%s: Failed to allocate %lu bytes align=%lx\n",
281 __func__
, PAGE_SIZE
, PAGE_SIZE
);
283 clear_page(last_pmd_table
);
284 mmu_page_ctor(last_pmd_table
);
287 return last_pmd_table
;
290 static void __init
map_node(int node
)
292 unsigned long physaddr
, virtaddr
, size
;
299 size
= m68k_memory
[node
].size
;
300 physaddr
= m68k_memory
[node
].addr
;
301 virtaddr
= (unsigned long)phys_to_virt(physaddr
);
302 physaddr
|= m68k_supervisor_cachemode
|
303 _PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_DIRTY
;
304 if (CPU_IS_040_OR_060
)
305 physaddr
|= _PAGE_GLOBAL040
;
309 if (!(virtaddr
& (PMD_SIZE
-1)))
310 printk ("\npa=%#lx va=%#lx ", physaddr
& PAGE_MASK
,
313 pgd_dir
= pgd_offset_k(virtaddr
);
314 if (virtaddr
&& CPU_IS_020_OR_030
) {
315 if (!(virtaddr
& (PGDIR_SIZE
-1)) &&
316 size
>= PGDIR_SIZE
) {
318 printk ("[very early term]");
320 pgd_val(*pgd_dir
) = physaddr
;
322 virtaddr
+= PGDIR_SIZE
;
323 physaddr
+= PGDIR_SIZE
;
327 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
328 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
329 if (!pud_present(*pud_dir
)) {
330 pmd_dir
= kernel_ptr_table();
332 printk ("[new pointer %p]", pmd_dir
);
334 pud_set(pud_dir
, pmd_dir
);
336 pmd_dir
= pmd_offset(pud_dir
, virtaddr
);
338 if (CPU_IS_020_OR_030
) {
341 printk ("[early term]");
343 pmd_val(*pmd_dir
) = physaddr
;
344 physaddr
+= PMD_SIZE
;
348 printk ("[zero map]");
350 pte_dir
= kernel_page_table();
351 pmd_set(pmd_dir
, pte_dir
);
353 pte_val(*pte_dir
++) = 0;
354 physaddr
+= PAGE_SIZE
;
355 for (i
= 1; i
< PTRS_PER_PTE
; physaddr
+= PAGE_SIZE
, i
++)
356 pte_val(*pte_dir
++) = physaddr
;
359 virtaddr
+= PMD_SIZE
;
361 if (!pmd_present(*pmd_dir
)) {
363 printk ("[new table]");
365 pte_dir
= kernel_page_table();
366 pmd_set(pmd_dir
, pte_dir
);
368 pte_dir
= pte_offset_kernel(pmd_dir
, virtaddr
);
371 if (!pte_present(*pte_dir
))
372 pte_val(*pte_dir
) = physaddr
;
374 pte_val(*pte_dir
) = 0;
376 virtaddr
+= PAGE_SIZE
;
377 physaddr
+= PAGE_SIZE
;
387 * paging_init() continues the virtual memory environment setup which
388 * was begun by the code in arch/head.S.
390 void __init
paging_init(void)
392 unsigned long max_zone_pfn
[MAX_NR_ZONES
] = { 0, };
393 unsigned long min_addr
, max_addr
;
398 printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir
, availmem
);
401 /* Fix the cache mode in the page descriptors for the 680[46]0. */
402 if (CPU_IS_040_OR_060
) {
405 mm_cachebits
= _PAGE_CACHE040
;
407 for (i
= 0; i
< 16; i
++)
408 pgprot_val(protection_map
[i
]) |= _PAGE_CACHE040
;
411 min_addr
= m68k_memory
[0].addr
;
412 max_addr
= min_addr
+ m68k_memory
[0].size
;
413 memblock_add_node(m68k_memory
[0].addr
, m68k_memory
[0].size
, 0);
414 for (i
= 1; i
< m68k_num_memory
;) {
415 if (m68k_memory
[i
].addr
< min_addr
) {
416 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
417 m68k_memory
[i
].addr
, m68k_memory
[i
].size
);
418 printk("Fix your bootloader or use a memfile to make use of this area!\n");
420 memmove(m68k_memory
+ i
, m68k_memory
+ i
+ 1,
421 (m68k_num_memory
- i
) * sizeof(struct m68k_mem_info
));
424 memblock_add_node(m68k_memory
[i
].addr
, m68k_memory
[i
].size
, i
);
425 addr
= m68k_memory
[i
].addr
+ m68k_memory
[i
].size
;
430 m68k_memoffset
= min_addr
- PAGE_OFFSET
;
431 m68k_virt_to_node_shift
= fls(max_addr
- min_addr
- 1) - 6;
433 module_fixup(NULL
, __start_fixup
, __stop_fixup
);
436 high_memory
= phys_to_virt(max_addr
);
438 min_low_pfn
= availmem
>> PAGE_SHIFT
;
439 max_pfn
= max_low_pfn
= max_addr
>> PAGE_SHIFT
;
441 /* Reserve kernel text/data/bss and the memory allocated in head.S */
442 memblock_reserve(m68k_memory
[0].addr
, availmem
- m68k_memory
[0].addr
);
445 * Map the physical memory available into the kernel virtual
446 * address space. Make sure memblock will not try to allocate
447 * pages beyond the memory we already mapped in head.S
449 memblock_set_bottom_up(true);
451 for (i
= 0; i
< m68k_num_memory
; i
++) {
459 * initialize the bad page table and bad page to point
460 * to a couple of allocated pages
462 empty_zero_page
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
463 if (!empty_zero_page
)
464 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
465 __func__
, PAGE_SIZE
, PAGE_SIZE
);
468 * Set up SFC/DFC registers
473 printk ("before free_area_init\n");
475 for (i
= 0; i
< m68k_num_memory
; i
++)
476 if (node_present_pages(i
))
477 node_set_state(i
, N_NORMAL_MEMORY
);
479 max_zone_pfn
[ZONE_DMA
] = memblock_end_of_DRAM();
480 free_area_init(max_zone_pfn
);