1 // SPDX-License-Identifier: GPL-2.0
3 * Based upon linux/arch/m68k/mm/sun3mmu.c
4 * Based upon linux/arch/ppc/mm/mmu_context.c
6 * Implementations of mm routines specific to the Coldfire MMU.
8 * Copyright (c) 2008 Freescale Semiconductor, Inc.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/memblock.h>
18 #include <asm/setup.h>
20 #include <asm/pgtable.h>
21 #include <asm/mmu_context.h>
22 #include <asm/mcf_pgalloc.h>
23 #include <asm/tlbflush.h>
25 #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
27 mm_context_t next_mmu_context
;
28 unsigned long context_map
[LAST_CONTEXT
/ BITS_PER_LONG
+ 1];
29 atomic_t nr_free_contexts
;
30 struct mm_struct
*context_mm
[LAST_CONTEXT
+1];
31 unsigned long num_pages
;
34 * ColdFire paging_init derived from sun3.
36 void __init
paging_init(void)
40 unsigned long address
, size
;
41 unsigned long next_pgtable
, bootmem_end
;
42 unsigned long zones_size
[MAX_NR_ZONES
];
46 empty_zero_page
= (void *) memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
48 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
49 __func__
, PAGE_SIZE
, PAGE_SIZE
);
51 pg_dir
= swapper_pg_dir
;
52 memset(swapper_pg_dir
, 0, sizeof(swapper_pg_dir
));
54 size
= num_pages
* sizeof(pte_t
);
55 size
= (size
+ PAGE_SIZE
) & ~(PAGE_SIZE
-1);
56 next_pgtable
= (unsigned long) memblock_alloc(size
, PAGE_SIZE
);
58 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
59 __func__
, size
, PAGE_SIZE
);
61 bootmem_end
= (next_pgtable
+ size
+ PAGE_SIZE
) & PAGE_MASK
;
62 pg_dir
+= PAGE_OFFSET
>> PGDIR_SHIFT
;
64 address
= PAGE_OFFSET
;
65 while (address
< (unsigned long)high_memory
) {
66 pg_table
= (pte_t
*) next_pgtable
;
67 next_pgtable
+= PTRS_PER_PTE
* sizeof(pte_t
);
68 pgd_val(*pg_dir
) = (unsigned long) pg_table
;
71 /* now change pg_table to kernel virtual addresses */
72 for (i
= 0; i
< PTRS_PER_PTE
; ++i
, ++pg_table
) {
73 pte_t pte
= pfn_pte(virt_to_pfn(address
), PAGE_INIT
);
74 if (address
>= (unsigned long) high_memory
)
77 set_pte(pg_table
, pte
);
84 for (zone
= 0; zone
< MAX_NR_ZONES
; zone
++)
85 zones_size
[zone
] = 0x0;
86 zones_size
[ZONE_DMA
] = num_pages
;
87 free_area_init(zones_size
);
90 int cf_tlb_miss(struct pt_regs
*regs
, int write
, int dtlb
, int extension_word
)
92 unsigned long flags
, mmuar
, mmutr
;
101 local_irq_save(flags
);
103 mmuar
= (dtlb
) ? mmu_read(MMUAR
) :
104 regs
->pc
+ (extension_word
* sizeof(long));
106 mm
= (!user_mode(regs
) && KMAPAREA(mmuar
)) ? &init_mm
: current
->mm
;
108 local_irq_restore(flags
);
112 pgd
= pgd_offset(mm
, mmuar
);
113 if (pgd_none(*pgd
)) {
114 local_irq_restore(flags
);
118 p4d
= p4d_offset(pgd
, mmuar
);
119 if (p4d_none(*p4d
)) {
120 local_irq_restore(flags
);
124 pud
= pud_offset(p4d
, mmuar
);
125 if (pud_none(*pud
)) {
126 local_irq_restore(flags
);
130 pmd
= pmd_offset(pud
, mmuar
);
131 if (pmd_none(*pmd
)) {
132 local_irq_restore(flags
);
136 pte
= (KMAPAREA(mmuar
)) ? pte_offset_kernel(pmd
, mmuar
)
137 : pte_offset_map(pmd
, mmuar
);
138 if (pte_none(*pte
) || !pte_present(*pte
)) {
139 local_irq_restore(flags
);
144 if (!pte_write(*pte
)) {
145 local_irq_restore(flags
);
148 set_pte(pte
, pte_mkdirty(*pte
));
151 set_pte(pte
, pte_mkyoung(*pte
));
152 asid
= mm
->context
& 0xff;
153 if (!pte_dirty(*pte
) && !KMAPAREA(mmuar
))
154 set_pte(pte
, pte_wrprotect(*pte
));
156 mmutr
= (mmuar
& PAGE_MASK
) | (asid
<< MMUTR_IDN
) | MMUTR_V
;
157 if ((mmuar
< TASK_UNMAPPED_BASE
) || (mmuar
>= TASK_SIZE
))
158 mmutr
|= (pte
->pte
& CF_PAGE_MMUTR_MASK
) >> CF_PAGE_MMUTR_SHIFT
;
159 mmu_write(MMUTR
, mmutr
);
161 mmu_write(MMUDR
, (pte_val(*pte
) & PAGE_MASK
) |
162 ((pte
->pte
) & CF_PAGE_MMUDR_MASK
) | MMUDR_SZ_8KB
| MMUDR_X
);
165 mmu_write(MMUOR
, MMUOR_ACC
| MMUOR_UAA
);
167 mmu_write(MMUOR
, MMUOR_ITLB
| MMUOR_ACC
| MMUOR_UAA
);
169 local_irq_restore(flags
);
173 void __init
cf_bootmem_alloc(void)
175 unsigned long memstart
;
177 /* _rambase and _ramend will be naturally page aligned */
178 m68k_memory
[0].addr
= _rambase
;
179 m68k_memory
[0].size
= _ramend
- _rambase
;
181 memblock_add(m68k_memory
[0].addr
, m68k_memory
[0].size
);
183 /* compute total pages in system */
184 num_pages
= PFN_DOWN(_ramend
- _rambase
);
187 memstart
= PAGE_ALIGN(_ramstart
);
188 min_low_pfn
= PFN_DOWN(_rambase
);
189 max_pfn
= max_low_pfn
= PFN_DOWN(_ramend
);
190 high_memory
= (void *)_ramend
;
192 /* Reserve kernel text/data/bss */
193 memblock_reserve(_rambase
, memstart
- _rambase
);
195 m68k_virt_to_node_shift
= fls(_ramend
- 1) - 6;
196 module_fixup(NULL
, __start_fixup
, __stop_fixup
);
198 /* setup node data */
203 * Initialize the context management stuff.
204 * The following was taken from arch/ppc/mmu_context.c
206 void __init
cf_mmu_context_init(void)
209 * Some processors have too few contexts to reserve one for
210 * init_mm, and require using context 0 for a normal task.
211 * Other processors reserve the use of context zero for the kernel.
212 * This code assumes FIRST_CONTEXT < 32.
214 context_map
[0] = (1 << FIRST_CONTEXT
) - 1;
215 next_mmu_context
= FIRST_CONTEXT
;
216 atomic_set(&nr_free_contexts
, LAST_CONTEXT
- FIRST_CONTEXT
+ 1);
220 * Steal a context from a task that has one at the moment.
221 * This is only used on 8xx and 4xx and we presently assume that
222 * they don't do SMP. If they do then thicfpgalloc.hs will have to check
223 * whether the MM we steal is in use.
224 * We also assume that this is only used on systems that don't
225 * use an MMU hash table - this is true for 8xx and 4xx.
226 * This isn't an LRU system, it just frees up each context in
227 * turn (sort-of pseudo-random replacement :). This would be the
228 * place to implement an LRU scheme if anyone was motivated to do it.
231 void steal_context(void)
233 struct mm_struct
*mm
;
235 * free up context `next_mmu_context'
236 * if we shouldn't free context 0, don't...
238 if (next_mmu_context
< FIRST_CONTEXT
)
239 next_mmu_context
= FIRST_CONTEXT
;
240 mm
= context_mm
[next_mmu_context
];