dccp: do not assume DCCP code is non preemptible
[linux/fpc-iii.git] / arch / metag / mm / mmu-meta2.c
blob81dcbb0bba342c4c63f4ef3c16c87b2b982b9c85
1 /*
2 * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd.
4 * Meta 2 enhanced mode MMU handling code.
6 */
8 #include <linux/mm.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/io.h>
12 #include <linux/bootmem.h>
13 #include <linux/syscore_ops.h>
15 #include <asm/mmu.h>
16 #include <asm/mmu_context.h>
18 unsigned long mmu_read_first_level_page(unsigned long vaddr)
20 unsigned int cpu = hard_processor_id();
21 unsigned long offset, linear_base, linear_limit;
22 unsigned int phys0;
23 pgd_t *pgd, entry;
25 if (is_global_space(vaddr))
26 vaddr &= ~0x80000000;
28 offset = vaddr >> PGDIR_SHIFT;
30 phys0 = metag_in32(mmu_phys0_addr(cpu));
32 /* Top bit of linear base is always zero. */
33 linear_base = (phys0 >> PGDIR_SHIFT) & 0x1ff;
35 /* Limit in the range 0 (4MB) to 9 (2GB). */
36 linear_limit = 1 << ((phys0 >> 8) & 0xf);
37 linear_limit += linear_base;
40 * If offset is below linear base or above the limit then no
41 * mapping exists.
43 if (offset < linear_base || offset > linear_limit)
44 return 0;
46 offset -= linear_base;
47 pgd = (pgd_t *)mmu_get_base();
48 entry = pgd[offset];
50 return pgd_val(entry);
53 unsigned long mmu_read_second_level_page(unsigned long vaddr)
55 return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK));
58 unsigned long mmu_get_base(void)
60 unsigned int cpu = hard_processor_id();
61 unsigned long stride;
63 stride = cpu * LINSYSMEMTnX_STRIDE;
66 * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be
67 * used as an offset to the start of the top-level pgd table.
69 stride += (metag_in32(mmu_phys1_addr(cpu)) & 0x7fffc);
71 if (is_global_space(PAGE_OFFSET))
72 stride += LINSYSMEMTXG_OFFSET;
74 return LINSYSMEMT0L_BASE + stride;
77 #define FIRST_LEVEL_MASK 0xffffffc0
78 #define SECOND_LEVEL_MASK 0xfffff000
79 #define SECOND_LEVEL_ALIGN 64
81 static void repriv_mmu_tables(void)
83 unsigned long phys0_addr;
84 unsigned int g;
87 * Check that all the mmu table regions are priv protected, and if not
88 * fix them and emit a warning. If we left them without priv protection
89 * then userland processes would have access to a 2M window into
90 * physical memory near where the page tables are.
92 phys0_addr = MMCU_T0LOCAL_TABLE_PHYS0;
93 for (g = 0; g < 2; ++g) {
94 unsigned int t, phys0;
95 unsigned long flags;
96 for (t = 0; t < 4; ++t) {
97 __global_lock2(flags);
98 phys0 = metag_in32(phys0_addr);
99 if ((phys0 & _PAGE_PRESENT) && !(phys0 & _PAGE_PRIV)) {
100 pr_warn("Fixing priv protection on T%d %s MMU table region\n",
102 g ? "global" : "local");
103 phys0 |= _PAGE_PRIV;
104 metag_out32(phys0, phys0_addr);
106 __global_unlock2(flags);
108 phys0_addr += MMCU_TnX_TABLE_PHYSX_STRIDE;
111 phys0_addr += MMCU_TXG_TABLE_PHYSX_OFFSET
112 - 4*MMCU_TnX_TABLE_PHYSX_STRIDE;
116 #ifdef CONFIG_METAG_SUSPEND_MEM
117 static void mmu_resume(void)
120 * If a full suspend to RAM has happened then the original bad MMU table
121 * priv may have been restored, so repriv them again.
123 repriv_mmu_tables();
125 #else
126 #define mmu_resume NULL
127 #endif /* CONFIG_METAG_SUSPEND_MEM */
129 static struct syscore_ops mmu_syscore_ops = {
130 .resume = mmu_resume,
133 void __init mmu_init(unsigned long mem_end)
135 unsigned long entry, addr;
136 pgd_t *p_swapper_pg_dir;
137 #ifdef CONFIG_KERNEL_4M_PAGES
138 unsigned long mem_size = mem_end - PAGE_OFFSET;
139 unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22);
140 unsigned int second_level_entry = 0;
141 unsigned long *second_level_table;
142 #endif
145 * Now copy over any MMU pgd entries already in the mmu page tables
146 * over to our root init process (swapper_pg_dir) map. This map is
147 * then inherited by all other processes, which means all processes
148 * inherit a map of the kernel space.
150 addr = META_MEMORY_BASE;
151 entry = pgd_index(META_MEMORY_BASE);
152 p_swapper_pg_dir = pgd_offset_k(0) + entry;
154 while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) {
155 unsigned long pgd_entry;
156 /* copy over the current MMU value */
157 pgd_entry = mmu_read_first_level_page(addr);
158 pgd_val(*p_swapper_pg_dir) = pgd_entry;
160 p_swapper_pg_dir++;
161 addr += PGDIR_SIZE;
162 entry++;
165 #ifdef CONFIG_KERNEL_4M_PAGES
167 * At this point we can also map the kernel with 4MB pages to
168 * reduce TLB pressure.
170 second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages);
172 addr = PAGE_OFFSET;
173 entry = pgd_index(PAGE_OFFSET);
174 p_swapper_pg_dir = pgd_offset_k(0) + entry;
176 while (pages > 0) {
177 unsigned long phys_addr, second_level_phys;
178 pte_t *pte = (pte_t *)&second_level_table[second_level_entry];
180 phys_addr = __pa(addr);
182 second_level_phys = __pa(pte);
184 pgd_val(*p_swapper_pg_dir) = ((second_level_phys &
185 FIRST_LEVEL_MASK) |
186 _PAGE_SZ_4M |
187 _PAGE_PRESENT);
189 pte_val(*pte) = ((phys_addr & SECOND_LEVEL_MASK) |
190 _PAGE_PRESENT | _PAGE_DIRTY |
191 _PAGE_ACCESSED | _PAGE_WRITE |
192 _PAGE_CACHEABLE | _PAGE_KERNEL);
194 p_swapper_pg_dir++;
195 addr += PGDIR_SIZE;
196 /* Second level pages must be 64byte aligned. */
197 second_level_entry += (SECOND_LEVEL_ALIGN /
198 sizeof(unsigned long));
199 pages--;
201 load_pgd(swapper_pg_dir, hard_processor_id());
202 flush_tlb_all();
203 #endif
205 repriv_mmu_tables();
206 register_syscore_ops(&mmu_syscore_ops);