1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/m68k/mm/motorola.c
5 * Routines specific to the Motorola MMU, originally from:
6 * linux/arch/m68k/init.c
7 * which are Copyright (C) 1995 Hamish Macdonald
9 * Moved 8/20/1999 Sam Creasey
12 #include <linux/module.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
16 #include <linux/swap.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/init.h>
21 #include <linux/memblock.h>
22 #include <linux/gfp.h>
24 #include <asm/setup.h>
25 #include <linux/uaccess.h>
27 #include <asm/pgalloc.h>
28 #include <asm/machdep.h>
31 #include <asm/atari_stram.h>
33 #include <asm/sections.h>
39 * Bits to add to page descriptors for "normal" caching mode.
40 * For 68020/030 this is 0.
41 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
43 unsigned long mm_cachebits
;
44 EXPORT_SYMBOL(mm_cachebits
);
47 /* Prior to calling these routines, the page should have been flushed
48 * from both the cache and ATC, or the CPU might not notice that the
49 * cache setting for the page has been changed. -jskov
51 static inline void nocache_page(void *vaddr
)
53 unsigned long addr
= (unsigned long)vaddr
;
55 if (CPU_IS_040_OR_060
) {
56 pte_t
*ptep
= virt_to_kpte(addr
);
58 *ptep
= pte_mknocache(*ptep
);
62 static inline void cache_page(void *vaddr
)
64 unsigned long addr
= (unsigned long)vaddr
;
66 if (CPU_IS_040_OR_060
) {
67 pte_t
*ptep
= virt_to_kpte(addr
);
69 *ptep
= pte_mkcache(*ptep
);
74 * Motorola 680x0 user's manual recommends using uncached memory for address
77 * Seeing how the MMU can be external on (some of) these chips, that seems like
78 * a very important recommendation to follow. Provide some helpers to combat
79 * 'variation' amongst the users of this.
82 void mmu_page_ctor(void *page
)
84 __flush_pages_to_ram(page
, 1);
85 flush_tlb_kernel_page(page
);
89 void mmu_page_dtor(void *page
)
94 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
95 struct page instead of separately kmalloced struct. Stolen from
96 arch/sparc/mm/srmmu.c ... */
98 typedef struct list_head ptable_desc
;
100 static struct list_head ptable_list
[2] = {
101 LIST_HEAD_INIT(ptable_list
[0]),
102 LIST_HEAD_INIT(ptable_list
[1]),
105 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
106 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
107 #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
109 static const int ptable_shift
[2] = {
114 #define ptable_size(type) (1U << ptable_shift[type])
115 #define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
117 void __init
init_pointer_table(void *table
, int type
)
120 unsigned long ptable
= (unsigned long)table
;
121 unsigned long page
= ptable
& PAGE_MASK
;
122 unsigned int mask
= 1U << ((ptable
- page
)/ptable_size(type
));
124 dp
= PD_PTABLE(page
);
125 if (!(PD_MARKBITS(dp
) & mask
)) {
126 PD_MARKBITS(dp
) = ptable_mask(type
);
127 list_add(dp
, &ptable_list
[type
]);
130 PD_MARKBITS(dp
) &= ~mask
;
131 pr_debug("init_pointer_table: %lx, %x\n", ptable
, PD_MARKBITS(dp
));
133 /* unreserve the page so it's possible to free that page */
134 __ClearPageReserved(PD_PAGE(dp
));
135 init_page_count(PD_PAGE(dp
));
140 void *get_pointer_table(int type
)
142 ptable_desc
*dp
= ptable_list
[type
].next
;
143 unsigned int mask
= list_empty(&ptable_list
[type
]) ? 0 : PD_MARKBITS(dp
);
144 unsigned int tmp
, off
;
147 * For a pointer table for a user process address space, a
148 * table is taken from a page allocated for the purpose. Each
149 * page can hold 8 pointer tables. The page is remapped in
150 * virtual address space to be noncacheable.
156 if (!(page
= (void *)get_zeroed_page(GFP_KERNEL
)))
159 if (type
== TABLE_PTE
) {
161 * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
164 pagetable_pte_ctor(virt_to_ptdesc(page
));
169 new = PD_PTABLE(page
);
170 PD_MARKBITS(new) = ptable_mask(type
) - 1;
171 list_add_tail(new, dp
);
173 return (pmd_t
*)page
;
176 for (tmp
= 1, off
= 0; (mask
& tmp
) == 0; tmp
<<= 1, off
+= ptable_size(type
))
178 PD_MARKBITS(dp
) = mask
& ~tmp
;
179 if (!PD_MARKBITS(dp
)) {
180 /* move to end of list */
181 list_move_tail(dp
, &ptable_list
[type
]);
183 return page_address(PD_PAGE(dp
)) + off
;
186 int free_pointer_table(void *table
, int type
)
189 unsigned long ptable
= (unsigned long)table
;
190 unsigned long page
= ptable
& PAGE_MASK
;
191 unsigned int mask
= 1U << ((ptable
- page
)/ptable_size(type
));
193 dp
= PD_PTABLE(page
);
194 if (PD_MARKBITS (dp
) & mask
)
195 panic ("table already free!");
197 PD_MARKBITS (dp
) |= mask
;
199 if (PD_MARKBITS(dp
) == ptable_mask(type
)) {
200 /* all tables in page are free, free page */
202 mmu_page_dtor((void *)page
);
203 if (type
== TABLE_PTE
)
204 pagetable_pte_dtor(virt_to_ptdesc((void *)page
));
207 } else if (ptable_list
[type
].next
!= dp
) {
209 * move this descriptor to the front of the list, since
210 * it has one or more free tables.
212 list_move(dp
, &ptable_list
[type
]);
217 /* size of memory already mapped in head.S */
218 extern __initdata
unsigned long m68k_init_mapped_size
;
220 extern unsigned long availmem
;
222 static pte_t
*last_pte_table __initdata
= NULL
;
224 static pte_t
* __init
kernel_page_table(void)
226 pte_t
*pte_table
= last_pte_table
;
228 if (PAGE_ALIGNED(last_pte_table
)) {
229 pte_table
= memblock_alloc_low(PAGE_SIZE
, PAGE_SIZE
);
231 panic("%s: Failed to allocate %lu bytes align=%lx\n",
232 __func__
, PAGE_SIZE
, PAGE_SIZE
);
235 clear_page(pte_table
);
236 mmu_page_ctor(pte_table
);
238 last_pte_table
= pte_table
;
241 last_pte_table
+= PTRS_PER_PTE
;
246 static pmd_t
*last_pmd_table __initdata
= NULL
;
248 static pmd_t
* __init
kernel_ptr_table(void)
250 if (!last_pmd_table
) {
251 unsigned long pmd
, last
;
254 /* Find the last ptr table that was used in head.S and
255 * reuse the remaining space in that page for further
258 last
= (unsigned long)kernel_pg_dir
;
259 for (i
= 0; i
< PTRS_PER_PGD
; i
++) {
260 pud_t
*pud
= (pud_t
*)(&kernel_pg_dir
[i
]);
262 if (!pud_present(*pud
))
264 pmd
= pgd_page_vaddr(kernel_pg_dir
[i
]);
269 last_pmd_table
= (pmd_t
*)last
;
271 printk("kernel_ptr_init: %p\n", last_pmd_table
);
275 last_pmd_table
+= PTRS_PER_PMD
;
276 if (PAGE_ALIGNED(last_pmd_table
)) {
277 last_pmd_table
= memblock_alloc_low(PAGE_SIZE
, PAGE_SIZE
);
279 panic("%s: Failed to allocate %lu bytes align=%lx\n",
280 __func__
, PAGE_SIZE
, PAGE_SIZE
);
282 clear_page(last_pmd_table
);
283 mmu_page_ctor(last_pmd_table
);
286 return last_pmd_table
;
289 static void __init
map_node(int node
)
291 unsigned long physaddr
, virtaddr
, size
;
298 size
= m68k_memory
[node
].size
;
299 physaddr
= m68k_memory
[node
].addr
;
300 virtaddr
= (unsigned long)phys_to_virt(physaddr
);
301 physaddr
|= m68k_supervisor_cachemode
|
302 _PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_DIRTY
;
303 if (CPU_IS_040_OR_060
)
304 physaddr
|= _PAGE_GLOBAL040
;
308 if (!(virtaddr
& (PMD_SIZE
-1)))
309 printk ("\npa=%#lx va=%#lx ", physaddr
& PAGE_MASK
,
312 pgd_dir
= pgd_offset_k(virtaddr
);
313 if (virtaddr
&& CPU_IS_020_OR_030
) {
314 if (!(virtaddr
& (PGDIR_SIZE
-1)) &&
315 size
>= PGDIR_SIZE
) {
317 printk ("[very early term]");
319 pgd_val(*pgd_dir
) = physaddr
;
321 virtaddr
+= PGDIR_SIZE
;
322 physaddr
+= PGDIR_SIZE
;
326 p4d_dir
= p4d_offset(pgd_dir
, virtaddr
);
327 pud_dir
= pud_offset(p4d_dir
, virtaddr
);
328 if (!pud_present(*pud_dir
)) {
329 pmd_dir
= kernel_ptr_table();
331 printk ("[new pointer %p]", pmd_dir
);
333 pud_set(pud_dir
, pmd_dir
);
335 pmd_dir
= pmd_offset(pud_dir
, virtaddr
);
337 if (CPU_IS_020_OR_030
) {
340 printk ("[early term]");
342 pmd_val(*pmd_dir
) = physaddr
;
343 physaddr
+= PMD_SIZE
;
347 printk ("[zero map]");
349 pte_dir
= kernel_page_table();
350 pmd_set(pmd_dir
, pte_dir
);
352 pte_val(*pte_dir
++) = 0;
353 physaddr
+= PAGE_SIZE
;
354 for (i
= 1; i
< PTRS_PER_PTE
; physaddr
+= PAGE_SIZE
, i
++)
355 pte_val(*pte_dir
++) = physaddr
;
358 virtaddr
+= PMD_SIZE
;
360 if (!pmd_present(*pmd_dir
)) {
362 printk ("[new table]");
364 pte_dir
= kernel_page_table();
365 pmd_set(pmd_dir
, pte_dir
);
367 pte_dir
= pte_offset_kernel(pmd_dir
, virtaddr
);
370 if (!pte_present(*pte_dir
))
371 pte_val(*pte_dir
) = physaddr
;
373 pte_val(*pte_dir
) = 0;
375 virtaddr
+= PAGE_SIZE
;
376 physaddr
+= PAGE_SIZE
;
386 * Alternate definitions that are compile time constants, for
387 * initializing protection_map. The cachebits are fixed later.
389 #define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
390 #define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
391 #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
392 #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
394 static pgprot_t protection_map
[16] __ro_after_init
= {
395 [VM_NONE
] = PAGE_NONE_C
,
396 [VM_READ
] = PAGE_READONLY_C
,
397 [VM_WRITE
] = PAGE_COPY_C
,
398 [VM_WRITE
| VM_READ
] = PAGE_COPY_C
,
399 [VM_EXEC
] = PAGE_READONLY_C
,
400 [VM_EXEC
| VM_READ
] = PAGE_READONLY_C
,
401 [VM_EXEC
| VM_WRITE
] = PAGE_COPY_C
,
402 [VM_EXEC
| VM_WRITE
| VM_READ
] = PAGE_COPY_C
,
403 [VM_SHARED
] = PAGE_NONE_C
,
404 [VM_SHARED
| VM_READ
] = PAGE_READONLY_C
,
405 [VM_SHARED
| VM_WRITE
] = PAGE_SHARED_C
,
406 [VM_SHARED
| VM_WRITE
| VM_READ
] = PAGE_SHARED_C
,
407 [VM_SHARED
| VM_EXEC
] = PAGE_READONLY_C
,
408 [VM_SHARED
| VM_EXEC
| VM_READ
] = PAGE_READONLY_C
,
409 [VM_SHARED
| VM_EXEC
| VM_WRITE
] = PAGE_SHARED_C
,
410 [VM_SHARED
| VM_EXEC
| VM_WRITE
| VM_READ
] = PAGE_SHARED_C
412 DECLARE_VM_GET_PAGE_PROT
415 * paging_init() continues the virtual memory environment setup which
416 * was begun by the code in arch/head.S.
418 void __init
paging_init(void)
420 unsigned long max_zone_pfn
[MAX_NR_ZONES
] = { 0, };
421 unsigned long min_addr
, max_addr
;
426 printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir
, availmem
);
429 /* Fix the cache mode in the page descriptors for the 680[46]0. */
430 if (CPU_IS_040_OR_060
) {
433 mm_cachebits
= _PAGE_CACHE040
;
435 for (i
= 0; i
< 16; i
++)
436 pgprot_val(protection_map
[i
]) |= _PAGE_CACHE040
;
439 min_addr
= m68k_memory
[0].addr
;
440 max_addr
= min_addr
+ m68k_memory
[0].size
- 1;
441 memblock_add_node(m68k_memory
[0].addr
, m68k_memory
[0].size
, 0,
443 for (i
= 1; i
< m68k_num_memory
;) {
444 if (m68k_memory
[i
].addr
< min_addr
) {
445 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
446 m68k_memory
[i
].addr
, m68k_memory
[i
].size
);
447 printk("Fix your bootloader or use a memfile to make use of this area!\n");
449 memmove(m68k_memory
+ i
, m68k_memory
+ i
+ 1,
450 (m68k_num_memory
- i
) * sizeof(struct m68k_mem_info
));
453 memblock_add_node(m68k_memory
[i
].addr
, m68k_memory
[i
].size
, i
,
455 addr
= m68k_memory
[i
].addr
+ m68k_memory
[i
].size
- 1;
460 m68k_memoffset
= min_addr
- PAGE_OFFSET
;
461 m68k_virt_to_node_shift
= fls(max_addr
- min_addr
) - 6;
463 module_fixup(NULL
, __start_fixup
, __stop_fixup
);
466 high_memory
= phys_to_virt(max_addr
) + 1;
468 min_low_pfn
= availmem
>> PAGE_SHIFT
;
469 max_pfn
= max_low_pfn
= (max_addr
>> PAGE_SHIFT
) + 1;
471 /* Reserve kernel text/data/bss and the memory allocated in head.S */
472 memblock_reserve(m68k_memory
[0].addr
, availmem
- m68k_memory
[0].addr
);
475 * Map the physical memory available into the kernel virtual
476 * address space. Make sure memblock will not try to allocate
477 * pages beyond the memory we already mapped in head.S
479 memblock_set_bottom_up(true);
481 for (i
= 0; i
< m68k_num_memory
; i
++) {
488 early_memtest(min_addr
, max_addr
);
491 * initialize the bad page table and bad page to point
492 * to a couple of allocated pages
494 empty_zero_page
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
495 if (!empty_zero_page
)
496 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
497 __func__
, PAGE_SIZE
, PAGE_SIZE
);
500 * Set up SFC/DFC registers
505 printk ("before free_area_init\n");
507 for (i
= 0; i
< m68k_num_memory
; i
++)
508 if (node_present_pages(i
))
509 node_set_state(i
, N_NORMAL_MEMORY
);
511 max_zone_pfn
[ZONE_DMA
] = memblock_end_of_DRAM();
512 free_area_init(max_zone_pfn
);