1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd.
5 * Meta 2 enhanced mode MMU handling code.
10 #include <linux/init.h>
11 #include <linux/kernel.h>
13 #include <linux/bootmem.h>
14 #include <linux/syscore_ops.h>
17 #include <asm/mmu_context.h>
19 unsigned long mmu_read_first_level_page(unsigned long vaddr
)
21 unsigned int cpu
= hard_processor_id();
22 unsigned long offset
, linear_base
, linear_limit
;
26 if (is_global_space(vaddr
))
29 offset
= vaddr
>> PGDIR_SHIFT
;
31 phys0
= metag_in32(mmu_phys0_addr(cpu
));
33 /* Top bit of linear base is always zero. */
34 linear_base
= (phys0
>> PGDIR_SHIFT
) & 0x1ff;
36 /* Limit in the range 0 (4MB) to 9 (2GB). */
37 linear_limit
= 1 << ((phys0
>> 8) & 0xf);
38 linear_limit
+= linear_base
;
41 * If offset is below linear base or above the limit then no
44 if (offset
< linear_base
|| offset
> linear_limit
)
47 offset
-= linear_base
;
48 pgd
= (pgd_t
*)mmu_get_base();
51 return pgd_val(entry
);
54 unsigned long mmu_read_second_level_page(unsigned long vaddr
)
56 return __builtin_meta2_cacherd((void *)(vaddr
& PAGE_MASK
));
59 unsigned long mmu_get_base(void)
61 unsigned int cpu
= hard_processor_id();
64 stride
= cpu
* LINSYSMEMTnX_STRIDE
;
67 * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be
68 * used as an offset to the start of the top-level pgd table.
70 stride
+= (metag_in32(mmu_phys1_addr(cpu
)) & 0x7fffc);
72 if (is_global_space(PAGE_OFFSET
))
73 stride
+= LINSYSMEMTXG_OFFSET
;
75 return LINSYSMEMT0L_BASE
+ stride
;
78 #define FIRST_LEVEL_MASK 0xffffffc0
79 #define SECOND_LEVEL_MASK 0xfffff000
80 #define SECOND_LEVEL_ALIGN 64
82 static void repriv_mmu_tables(void)
84 unsigned long phys0_addr
;
88 * Check that all the mmu table regions are priv protected, and if not
89 * fix them and emit a warning. If we left them without priv protection
90 * then userland processes would have access to a 2M window into
91 * physical memory near where the page tables are.
93 phys0_addr
= MMCU_T0LOCAL_TABLE_PHYS0
;
94 for (g
= 0; g
< 2; ++g
) {
95 unsigned int t
, phys0
;
97 for (t
= 0; t
< 4; ++t
) {
98 __global_lock2(flags
);
99 phys0
= metag_in32(phys0_addr
);
100 if ((phys0
& _PAGE_PRESENT
) && !(phys0
& _PAGE_PRIV
)) {
101 pr_warn("Fixing priv protection on T%d %s MMU table region\n",
103 g
? "global" : "local");
105 metag_out32(phys0
, phys0_addr
);
107 __global_unlock2(flags
);
109 phys0_addr
+= MMCU_TnX_TABLE_PHYSX_STRIDE
;
112 phys0_addr
+= MMCU_TXG_TABLE_PHYSX_OFFSET
113 - 4*MMCU_TnX_TABLE_PHYSX_STRIDE
;
117 #ifdef CONFIG_METAG_SUSPEND_MEM
118 static void mmu_resume(void)
121 * If a full suspend to RAM has happened then the original bad MMU table
122 * priv may have been restored, so repriv them again.
127 #define mmu_resume NULL
128 #endif /* CONFIG_METAG_SUSPEND_MEM */
130 static struct syscore_ops mmu_syscore_ops
= {
131 .resume
= mmu_resume
,
134 void __init
mmu_init(unsigned long mem_end
)
136 unsigned long entry
, addr
;
137 pgd_t
*p_swapper_pg_dir
;
138 #ifdef CONFIG_KERNEL_4M_PAGES
139 unsigned long mem_size
= mem_end
- PAGE_OFFSET
;
140 unsigned int pages
= DIV_ROUND_UP(mem_size
, 1 << 22);
141 unsigned int second_level_entry
= 0;
142 unsigned long *second_level_table
;
146 * Now copy over any MMU pgd entries already in the mmu page tables
147 * over to our root init process (swapper_pg_dir) map. This map is
148 * then inherited by all other processes, which means all processes
149 * inherit a map of the kernel space.
151 addr
= META_MEMORY_BASE
;
152 entry
= pgd_index(META_MEMORY_BASE
);
153 p_swapper_pg_dir
= pgd_offset_k(0) + entry
;
155 while (entry
< (PTRS_PER_PGD
- pgd_index(META_MEMORY_BASE
))) {
156 unsigned long pgd_entry
;
157 /* copy over the current MMU value */
158 pgd_entry
= mmu_read_first_level_page(addr
);
159 pgd_val(*p_swapper_pg_dir
) = pgd_entry
;
166 #ifdef CONFIG_KERNEL_4M_PAGES
168 * At this point we can also map the kernel with 4MB pages to
169 * reduce TLB pressure.
171 second_level_table
= alloc_bootmem_pages(SECOND_LEVEL_ALIGN
* pages
);
174 entry
= pgd_index(PAGE_OFFSET
);
175 p_swapper_pg_dir
= pgd_offset_k(0) + entry
;
178 unsigned long phys_addr
, second_level_phys
;
179 pte_t
*pte
= (pte_t
*)&second_level_table
[second_level_entry
];
181 phys_addr
= __pa(addr
);
183 second_level_phys
= __pa(pte
);
185 pgd_val(*p_swapper_pg_dir
) = ((second_level_phys
&
190 pte_val(*pte
) = ((phys_addr
& SECOND_LEVEL_MASK
) |
191 _PAGE_PRESENT
| _PAGE_DIRTY
|
192 _PAGE_ACCESSED
| _PAGE_WRITE
|
193 _PAGE_CACHEABLE
| _PAGE_KERNEL
);
197 /* Second level pages must be 64byte aligned. */
198 second_level_entry
+= (SECOND_LEVEL_ALIGN
/
199 sizeof(unsigned long));
202 load_pgd(swapper_pg_dir
, hard_processor_id());
207 register_syscore_ops(&mmu_syscore_ops
);